Loading [MathJax]/jax/output/HTML-CSS/config.js
Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
moab::WriteHDF5Parallel Class Reference

Write MOAB HDF5 file in parallel. More...

#include <WriteHDF5Parallel.hpp>

+ Inheritance diagram for moab::WriteHDF5Parallel:
+ Collaboration diagram for moab::WriteHDF5Parallel:

Classes

struct  DataSetCreator
 Argument ot create_dataset. More...
 
struct  NoopDescCreator
 

Public Member Functions

 WriteHDF5Parallel (Interface *iface)
 
virtual ~WriteHDF5Parallel ()
 
- Public Member Functions inherited from moab::WriteHDF5
 WriteHDF5 (Interface *iface)
 
virtual ~WriteHDF5 ()
 
ErrorCode write_file (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list=NULL, int num_tags=0, int user_dimension=3)
 
mhdf_FileHandle file_ptr ()
 
WriteUtilIfacewrite_util ()
 
ErrorCode create_elem_table (const ExportSet &block, long num_ents, long &first_id_out)
 
ErrorCode create_set_meta (long num_sets, long &first_id_out)
 
- Public Member Functions inherited from moab::WriterIface
virtual ~WriterIface ()
 

Static Public Member Functions

static WriterIfacefactory (Interface *)
 
- Static Public Member Functions inherited from moab::WriteHDF5
static WriterIfacefactory (Interface *)
 

Protected Member Functions

virtual void debug_barrier_line (int lineno)
 
virtual void print_times (const double *times) const
 
virtual ErrorCode parallel_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const FileOptions &opts, const Tag *user_tag_list=0, int user_tag_count=0, int dimension=3, double *times=0)
 Called by normal (non-parallel) writer. Sets up necessary data for parallel write. More...
 
ErrorCode gather_interface_meshes (Range &non_local_ents)
 Figure out which mesh local mesh is duplicated on remote processors and which processor will write that mesh. More...
 
ErrorCode exchange_file_ids (const Range &non_local_ents)
 For entities that will be written by another processor but are referenced by entities on this processor, get the file Ids that will be assigned to those so they can be referenced by entities to be written on this processor. More...
 
ErrorCode communicate_shared_set_ids (const Range &owned, const Range &remote)
 Get remote ids for shared sets. More...
 
ErrorCode pack_set (Range::const_iterator set, unsigned long *set_data, size_t set_data_length)
 Pack set data for communication. More...
 
ErrorCode unpack_set (EntityHandle set, const unsigned long *set_data, size_t set_data_length)
 Unpack set data from communication. More...
 
ErrorCode communicate_shared_set_data (const Range &owned, const Range &remote)
 Communicate set contents between processors such that each owner knows the contents, parents, & child lists from all processors that have a copy of the set. More...
 
ErrorCode create_node_table (int dimension)
 Create the node table in the file. More...
 
ErrorCode negotiate_type_list ()
 Communicate with other processors to negotiate the types of elements that will be written (the union of the types defined on each proc.) More...
 
ErrorCode create_element_tables ()
 Create tables to hold element connectivity. More...
 
ErrorCode create_adjacency_tables ()
 Create tables to hold element adjacencies. More...
 
ErrorCode create_meshset_tables (double *times)
 Create tables for mesh sets. More...
 
ErrorCode create_tag_tables ()
 Write tag descriptions and create tables to hold tag data. More...
 
void remove_remote_entities (EntityHandle relative, Range &range)
 Remove any remote mesh entities from the passed range. More...
 
void remove_remote_entities (EntityHandle relative, std::vector< EntityHandle > &vect)
 
void remove_remote_sets (EntityHandle relative, Range &range)
 
void remove_remote_sets (EntityHandle relative, std::vector< EntityHandle > &vect)
 
ErrorCode get_sharedset_tags ()
 get any existing tags which aren't excluded and add to shared set tags More...
 
ErrorCode append_serial_tag_data (std::vector< unsigned char > &buffer, const WriteHDF5::TagDesc &tag)
 
ErrorCode check_serial_tag_data (const std::vector< unsigned char > &buffer, std::vector< TagDesc * > *missing=0, std::vector< TagDesc * > *newlist=0)
 helper function for create_tag_tables More...
 
ErrorCode create_dataset (int num_datasets, const long *num_owned_entities, long *offsets_out, long *max_proc_ents_out, long *total_ents_out, const DataSetCreator &creator=NoopDescCreator(), ExportSet *groups[]=0, wid_t *first_ids_out=NULL)
 Do typical communication for dataset creation. More...
 
void print_shared_sets ()
 
void print_set_sharing_data (const Range &range, const char *label, Tag idt)
 
- Protected Member Functions inherited from moab::WriteHDF5
virtual ErrorCode write_finished ()
 
ErrorCode gather_tags (const Tag *user_tag_list, int user_tag_list_length)
 Gather tags. More...
 
bool check_dense_format_tag (const ExportSet &ents, const Range &all_tagged, bool prefer_dense)
 
ErrorCode count_adjacencies (const Range &elements, wid_t &result)
 
ErrorCode count_set_size (const Range &sets, long &contents_length_out, long &children_length_out, long &parents_length_out)
 
ErrorCode get_set_info (EntityHandle set, long &num_entities, long &num_children, long &num_parents, unsigned long &flags)
 Get information about a meshset. More...
 
ErrorCode create_set_tables (long contents_length, long children_length, long parents_length)
 
ErrorCode write_qa (const std::vector< std::string > &list)
 Write exodus-type QA info. More...
 
ErrorCode get_num_sparse_tagged_entities (const TagDesc &tag, size_t &count)
 Get tagged entities for which to write tag values. More...
 
ErrorCode get_sparse_tagged_entities (const TagDesc &tag, Range &range)
 Get tagged entities for which to write tag values. More...
 
void get_write_entities (Range &range)
 Get entities that will be written to file. More...
 
const ExportSetfind (const ExportType &type) const
 
const SpecialSetDatafind_set_data (EntityHandle h) const
 
SpecialSetDatafind_set_data (EntityHandle h)
 
void print_id_map () const
 
void print_id_map (std::ostream &str, const char *prefix="") const
 
ErrorCode create_tag (const TagDesc &tag_data, unsigned long num_entities, unsigned long var_len_total)
 
ErrorCode assign_ids (const Range &entities, wid_t first_id)
 add entities to idMap More...
 
ErrorCode range_to_blocked_list (const Range &input_range, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_blocked_list (const EntityHandle *input_ranges, size_t num_input_ranges, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_id_list (const Range &input_range, wid_t *array)
 
ErrorCode vector_to_id_list (const std::vector< EntityHandle > &input, std::vector< wid_t > &output, bool remove_non_written=false)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, wid_t *output, size_t num_entities)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, size_t input_len, wid_t *output, size_t &output_len, bool remove_non_written)
 Get IDs for entities. More...
 
bool convert_handle_tag (EntityHandle *data, size_t count) const
 
bool convert_handle_tag (const EntityHandle *source, EntityHandle *dest, size_t count) const
 
ErrorCode get_adjacencies (EntityHandle entity, std::vector< wid_t > &adj)
 
ErrorCode get_tag_data_length (const TagDesc &tag_info, const Range &range, unsigned long &result)
 get sum of lengths of tag values (as number of type) for variable length tag data. More...
 
virtual void print_times (const double times[NUM_TIMES]) const
 

Private Attributes

ParallelCommmyPcomm
 pcomm controlling parallel nature of mesh More...
 
bool pcommAllocated
 whether this instance allocated (and dtor should delete) the pcomm More...
 
H5S_seloper_t hslabOp
 Operation to use to append hyperslab selections. More...
 

Additional Inherited Members

- Public Types inherited from moab::WriteHDF5
typedef EntityHandle wid_t
 
- Static Public Attributes inherited from moab::WriteHDF5
static const hid_t id_type = get_id_type()
 
- Protected Types inherited from moab::WriteHDF5
enum  TimingValues {
  TOTAL_TIME = 0 , GATHER_TIME , CREATE_TIME , CREATE_NODE_TIME ,
  NEGOTIATE_TYPES_TIME , CREATE_ELEM_TIME , FILEID_EXCHANGE_TIME , CREATE_ADJ_TIME ,
  CREATE_SET_TIME , SHARED_SET_IDS , SHARED_SET_CONTENTS , SET_OFFSET_TIME ,
  CREATE_TAG_TIME , COORD_TIME , CONN_TIME , SET_TIME ,
  SET_META , SET_CONTENT , SET_PARENT , SET_CHILD ,
  ADJ_TIME , TAG_TIME , DENSE_TAG_TIME , SPARSE_TAG_TIME ,
  VARLEN_TAG_TIME , NUM_TIMES
}
 
- Protected Attributes inherited from moab::WriteHDF5
HDF5ErrorHandler errorHandler
 Store old HDF5 error handling function. More...
 
size_t bufferSize
 The size of the data buffer (dataBuffer). More...
 
char * dataBuffer
 A memory buffer to use for all I/O operations. More...
 
InterfaceiFace
 Interface pointer passed to constructor. More...
 
WriteUtilIfacewriteUtil
 Cached pointer to writeUtil interface. More...
 
mhdf_FileHandle filePtr
 The file handle from the mhdf library. More...
 
RangeMap< EntityHandle, wid_tidMap
 Map from entity handles to file IDs. More...
 
std::list< ExportSetexportList
 The list elements to export. More...
 
ExportSet nodeSet
 The list of nodes to export. More...
 
ExportSet setSet
 The list of sets to export. More...
 
unsigned long setContentsOffset
 Offset into set contents table (zero except for parallel) More...
 
unsigned long setChildrenOffset
 Offset into set children table (zero except for parallel) More...
 
unsigned long setParentsOffset
 
long maxNumSetContents
 The largest number of values to write for any processor (needed to do collective IO). More...
 
long maxNumSetChildren
 
long maxNumSetParents
 
bool writeSets
 Flags idicating if set data should be written. For the normal (non-parallel) case, these values will depend only on whether or not there is any data to be written. For parallel-meshes, opening the data table is collective so the values must depend on whether or not any processor has meshsets to be written. More...
 
bool writeSetContents
 
bool writeSetChildren
 
bool writeSetParents
 
std::vector< SpecialSetDataspecialSets
 Array of special/shared sets, in order of handle value. More...
 
std::list< TagDesctagList
 The list of tags to export. More...
 
bool parallelWrite
 True if doing parallel write. More...
 
bool collectiveIO
 True if using collective IO calls for parallel write. More...
 
bool writeTagDense
 True if writing dense-formatted tag data. More...
 
hid_t writeProp
 Property set to pass to H5Dwrite calls. For serial, should be H5P_DEFAULTS. For parallel, may request collective IO. More...
 
DebugOutput dbgOut
 Utility to log debug output. More...
 
bool debugTrack
 Look for overlapping and/or missing writes. More...
 
- Static Protected Attributes inherited from moab::WriteHDF5
static MPEState topState
 
static MPEState subState
 

Detailed Description

Write MOAB HDF5 file in parallel.

Author
Jason Kraftcheck \data 22 July 2004

Definition at line 20 of file WriteHDF5Parallel.hpp.

Constructor & Destructor Documentation

◆ WriteHDF5Parallel()

moab::WriteHDF5Parallel::WriteHDF5Parallel ( Interface iface)

Consturctor

Definition at line 279 of file WriteHDF5Parallel.cpp.

280  : WriteHDF5( iface ), myPcomm( NULL ), pcommAllocated( false ), hslabOp( H5S_SELECT_OR ) 281 { 282 }

Referenced by factory().

◆ ~WriteHDF5Parallel()

moab::WriteHDF5Parallel::~WriteHDF5Parallel ( )
virtual

Definition at line 284 of file WriteHDF5Parallel.cpp.

285 { 286  if( pcommAllocated && myPcomm ) delete myPcomm; 287 }

References myPcomm, and pcommAllocated.

Member Function Documentation

◆ append_serial_tag_data()

ErrorCode moab::WriteHDF5Parallel::append_serial_tag_data ( std::vector< unsigned char > &  buffer,
const WriteHDF5::TagDesc tag 
)
protected

Definition at line 582 of file WriteHDF5Parallel.cpp.

584 { 585  ErrorCode rval; 586  587  std::string name; 588  rval = iFace->tag_get_name( tag.tag_id, name ); 589  if( MB_SUCCESS != rval ) return error( rval ); 590  591  // Get name length, including space for null char 592  size_t name_len = name.size() + 1; 593  if( name_len == 1 ) return MB_SUCCESS; // Skip tags with no name 594  595  DataType data_type; 596  rval = iFace->tag_get_data_type( tag.tag_id, data_type ); 597  if( MB_SUCCESS != rval ) return error( rval ); 598  599  // Get default value 600  int def_val_len; 601  const void* def_val; 602  if( MB_SUCCESS != iFace->tag_get_default_value( tag.tag_id, def_val, def_val_len ) ) 603  { 604  def_val_len = 0; 605  def_val = 0; 606  } 607  608  // Allocate struct within buffer 609  size_t init_size = buffer.size(); 610  buffer.resize( init_size + serial_tag_data::len( name_len, def_val_len, data_type ) ); 611  serial_tag_data* ptr = reinterpret_cast< serial_tag_data* >( &buffer[init_size] ); 612  613  // Populate struct 614  rval = iFace->tag_get_type( tag.tag_id, ptr->storage ); 615  if( MB_SUCCESS != rval ) return error( rval ); 616  ptr->type = data_type; 617  rval = iFace->tag_get_length( tag.tag_id, ptr->size ); 618  if( MB_VARIABLE_DATA_LENGTH == rval ) 619  ptr->size = MB_VARIABLE_LENGTH; 620  else if( MB_SUCCESS != rval ) 621  return error( rval ); 622  ptr->name_len = name_len; 623  Range range; 624  memset( ptr->name, 0, ptr->name_len ); 625  memcpy( ptr->name, name.data(), name.size() ); 626  ptr->def_val_len = def_val_len; 627  ptr->set_default_value( def_val ); 628  629  return MB_SUCCESS; 630 }

References buffer, moab::serial_tag_data::def_val_len, moab::error(), ErrorCode, moab::WriteHDF5::iFace, moab::serial_tag_data::len(), MB_SUCCESS, MB_VARIABLE_DATA_LENGTH, MB_VARIABLE_LENGTH, moab::serial_tag_data::name, moab::serial_tag_data::name_len, moab::serial_tag_data::set_default_value(), moab::serial_tag_data::size, moab::serial_tag_data::storage, moab::Interface::tag_get_data_type(), moab::Interface::tag_get_default_value(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::Interface::tag_get_type(), moab::WriteHDF5::TagDesc::tag_id, and moab::serial_tag_data::type.

Referenced by create_tag_tables().

◆ check_serial_tag_data()

ErrorCode moab::WriteHDF5Parallel::check_serial_tag_data ( const std::vector< unsigned char > &  buffer,
std::vector< TagDesc * > *  missing = 0,
std::vector< TagDesc * > *  newlist = 0 
)
protected

helper function for create_tag_tables

Definition at line 632 of file WriteHDF5Parallel.cpp.

635 { 636  ErrorCode rval; 637  638  // Use 'write_sparse' field as a 'visited' mark 639  std::list< TagDesc >::iterator tag_iter; 640  if( missing ) 641  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter ) 642  tag_iter->write_sparse = true; 643  644  // Use a set as a temporary for what will ultimately go in 645  // newlist because we need to pass back newlist in the order 646  // of the tagList member. 647  std::set< TagDesc* > newset; 648  649  // Iterate over data from, updating the local list of tags. 650  // Be careful to keep tagList sorted such that in the end all 651  // procs have the same list in the same order. 652  std::vector< unsigned char >::const_iterator diter = buffer.begin(); 653  tag_iter = tagList.begin(); 654  while( diter < buffer.end() ) 655  { 656  // Get struct from buffer 657  const serial_tag_data* ptr = reinterpret_cast< const serial_tag_data* >( &*diter ); 658  659  // Find local struct for tag 660  std::string name( ptr->name ); 661  std::string n; 662  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail 663  if( n > name ) 664  { 665  tag_iter = tagList.begin(); // New proc, start search from beginning 666  } 667  iFace->tag_get_name( tag_iter->tag_id, n ); 668  while( n < name ) 669  { 670  ++tag_iter; 671  if( tag_iter == tagList.end() ) break; 672  iFace->tag_get_name( tag_iter->tag_id, n ); 673  } 674  if( tag_iter == tagList.end() || n != name ) 675  { // New tag 676  TagDesc newtag; 677  678  if( ptr->size == MB_VARIABLE_LENGTH ) 679  rval = iFace->tag_get_handle( name.c_str(), ptr->def_val_len, ptr->type, newtag.tag_id, 680  MB_TAG_VARLEN | MB_TAG_CREAT | ptr->storage, ptr->default_value() ); 681  else 682  rval = iFace->tag_get_handle( name.c_str(), ptr->size, ptr->type, newtag.tag_id, 683  MB_TAG_CREAT | ptr->storage, ptr->default_value() ); 684  if( MB_SUCCESS != rval ) return error( rval ); 685  686  newtag.sparse_offset = 0; 687  newtag.var_data_offset = 0; 688  newtag.write_sparse = false; 689  newtag.max_num_ents = 0; 690  newtag.max_num_vals = 0; 691  692  tag_iter = tagList.insert( tag_iter, newtag ); 693  if( newlist ) newset.insert( &*tag_iter ); 694  } 695  else 696  { // Check that tag is as expected 697  DataType type; 698  iFace->tag_get_data_type( tag_iter->tag_id, type ); 699  if( type != ptr->type ) 700  { 701  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent data type for tag \"" << name << "\"" ); 702  } 703  int size; 704  iFace->tag_get_length( tag_iter->tag_id, size ); 705  if( size != ptr->size ) 706  { 707  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent size for tag \"" << name << "\"" ); 708  } 709  tag_iter->write_sparse = false; 710  } 711  712  // Step to next variable-length struct. 713  diter += ptr->len(); 714  } 715  716  // Now pass back any local tags that weren't in the buffer 717  if( missing ) 718  { 719  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter ) 720  { 721  if( tag_iter->write_sparse ) 722  { 723  tag_iter->write_sparse = false; 724  missing->push_back( &*tag_iter ); 725  } 726  } 727  } 728  729  // Be careful to populate newlist in the same, sorted, order as tagList 730  if( newlist ) 731  { 732  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter ) 733  if( newset.find( &*tag_iter ) != newset.end() ) newlist->push_back( &*tag_iter ); 734  } 735  736  return MB_SUCCESS; 737 }

References buffer, moab::serial_tag_data::def_val_len, moab::serial_tag_data::default_value(), moab::error(), ErrorCode, moab::WriteHDF5::iFace, moab::serial_tag_data::len(), moab::WriteHDF5::TagDesc::max_num_ents, moab::WriteHDF5::TagDesc::max_num_vals, MB_SET_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_VARLEN, MB_VARIABLE_LENGTH, moab::serial_tag_data::name, size, moab::serial_tag_data::size, moab::WriteHDF5::TagDesc::sparse_offset, moab::serial_tag_data::storage, moab::Interface::tag_get_data_type(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::WriteHDF5::TagDesc::tag_id, moab::WriteHDF5::tagList, moab::serial_tag_data::type, moab::WriteHDF5::TagDesc::var_data_offset, and moab::WriteHDF5::TagDesc::write_sparse.

Referenced by create_tag_tables().

◆ communicate_shared_set_data()

ErrorCode moab::WriteHDF5Parallel::communicate_shared_set_data ( const Range owned,
const Range remote 
)
protected

Communicate set contents between processors such that each owner knows the contents, parents, & child lists from all processors that have a copy of the set.

Definition at line 1866 of file WriteHDF5Parallel.cpp.

1867 { 1868  ErrorCode rval; 1869  int mperr; 1870  const unsigned rank = myPcomm->proc_config().proc_rank(); 1871  const MPI_Comm comm = myPcomm->proc_config().proc_comm(); 1872  1873  dbgOut.tprintf( 1, "COMMUNICATING SHARED SET DATA (%lu owned & %lu remote)\n", (unsigned long)owned.size(), 1874  (unsigned long)remote.size() ); 1875  1876  // Calculate the total number of messages to be in transit (send and receive) 1877  size_t nummess = 0; 1878  std::vector< unsigned > procs; 1879  ; 1880  Range shared( owned ); 1881  shared.merge( remote ); 1882  for( Range::iterator i = shared.begin(); i != shared.end(); ++i ) 1883  { 1884  procs.clear(); 1885  rval = myPcomm->get_entityset_procs( *i, procs ); 1886  CHECK_MB( rval ); 1887  nummess += procs.size(); 1888  } 1889  1890  // Choose a receive buffer size. We need 4*sizeof(long) minimum, 1891  // but that is almost useless so use 16*sizeof(long) as the minimum 1892  // instead. Choose an upper limit such that we don't exceed 32 MB 1893  // of allocated memory (unless we absolutely must to meet the minimum.) 1894  // Also, don't initially choose buffers larger than 128*sizeof(long). 1895  const size_t MAX_BUFFER_MEM = 32 * 1024 * 1024 / sizeof( long ); 1896  // const size_t INIT_BUFFER_SIZE = 128; 1897  const size_t INIT_BUFFER_SIZE = 1024; 1898  const size_t MIN_BUFFER_SIZE = 16; 1899  size_t init_buff_size = INIT_BUFFER_SIZE; 1900  if( init_buff_size * nummess > MAX_BUFFER_MEM ) init_buff_size = MAX_BUFFER_MEM / nummess; 1901  if( init_buff_size < MIN_BUFFER_SIZE ) init_buff_size = MIN_BUFFER_SIZE; 1902  1903  dbgOut.printf( 2, "Using buffer size of %lu for an expected message count of %lu\n", (unsigned long)init_buff_size, 1904  (unsigned long)nummess ); 1905  1906  // Count number of recvs 1907  size_t numrecv = 0; 1908  for( Range::iterator i = owned.begin(); i != owned.end(); ++i ) 1909  { 1910  procs.clear(); 1911  rval = myPcomm->get_entityset_procs( *i, procs ); 1912  CHECK_MB( rval ); 1913  numrecv += procs.size(); 1914  if( std::find( procs.begin(), procs.end(), rank ) != procs.end() ) --numrecv; 1915  } 1916  1917  // Post receive buffers for all owned sets for all sharing procs 1918  std::vector< MPI_Request > recv_req( numrecv, MPI_REQUEST_NULL ); 1919  std::vector< MPI_Request > lrecv_req( numrecv, MPI_REQUEST_NULL ); 1920  1921  std::vector< std::vector< unsigned long > > recv_buf( numrecv, std::vector< unsigned long >( init_buff_size ) ); 1922  int idx = 0; 1923  for( Range::iterator i = owned.begin(); i != owned.end(); ++i ) 1924  { 1925  procs.clear(); 1926  rval = myPcomm->get_entityset_procs( *i, procs ); 1927  CHECK_MB( rval ); 1928  for( size_t j = 0; j < procs.size(); ++j ) 1929  { 1930  if( procs[j] == rank ) continue; 1931  int tag = ID_FROM_HANDLE( *i ); 1932  if( *i != CREATE_HANDLE( MBENTITYSET, tag ) ) 1933  { 1934 #ifndef NDEBUG 1935  abort(); 1936 #endif 1937  CHECK_MB( MB_FAILURE ); 1938  } 1939  dbgOut.printf( 5, "Posting buffer to receive set %d from proc %u\n", tag, procs[j] ); 1940  mperr = 1941  MPI_Irecv( &recv_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, procs[j], tag, comm, &recv_req[idx] ); 1942  CHECK_MPI( mperr ); 1943  ++idx; 1944  } 1945  } 1946  assert( (size_t)idx == numrecv ); 1947  1948  // Now send set data for all remote sets that I know about 1949  std::vector< MPI_Request > send_req( remote.size() ); 1950  std::vector< std::vector< unsigned long > > send_buf( remote.size() ); 1951  idx = 0; 1952  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx ) 1953  { 1954  send_buf[idx].resize( init_buff_size ); 1955  rval = pack_set( i, &send_buf[idx][0], init_buff_size ); 1956  CHECK_MB( rval ); 1957  EntityHandle remote_handle; 1958  unsigned owner; 1959  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle ); 1960  CHECK_MB( rval ); 1961  1962  int tag = ID_FROM_HANDLE( remote_handle ); 1963  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) ); 1964  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n", 1965  send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner ); 1966  mperr = MPI_Isend( &send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] ); 1967  CHECK_MPI( mperr ); 1968  } 1969  1970  // Tag mattag; 1971  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag); 1972  1973  // Now initialize local data for managing contents of owned, shared sets 1974  assert( specialSets.empty() ); 1975  specialSets.clear(); 1976  specialSets.reserve( owned.size() ); 1977  for( Range::iterator i = owned.begin(); i != owned.end(); ++i ) 1978  { 1979  // int block; 1980  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*i, 1, &block)) 1981  // block = 0; 1982  // std::vector<int> ids; 1983  1984  SpecialSetData data; 1985  data.setHandle = *i; 1986  rval = iFace->get_meshset_options( *i, data.setFlags ); 1987  CHECK_MB( rval ); 1988  specialSets.push_back( data ); 1989  std::vector< EntityHandle > list; 1990  if( data.setFlags & MESHSET_ORDERED ) 1991  { 1992  list.clear(); 1993  rval = iFace->get_entities_by_handle( *i, list ); 1994  CHECK_MB( rval ); 1995  rval = vector_to_id_list( list, specialSets.back().contentIds, true ); 1996  CHECK_MB( rval ); 1997  // if (block) 1998  // get_global_ids(iFace, &list[0], list.size(), MESHSET_ORDERED, ids); 1999  } 2000  else 2001  { 2002  Range range; 2003  rval = iFace->get_entities_by_handle( *i, range ); 2004  CHECK_MB( rval ); 2005  bool ranged; 2006  rval = range_to_blocked_list( range, specialSets.back().contentIds, ranged ); 2007  if( ranged ) specialSets.back().setFlags |= mhdf_SET_RANGE_BIT; 2008  // if (block) { 2009  // std::vector<EntityHandle> tmp; 2010  // for (Range::const_pair_iterator pi = range.const_pair_begin(); pi != 2011  // range.const_pair_end(); ++pi) { 2012  // tmp.push_back(pi->first); 2013  // tmp.push_back(pi->second); 2014  // } 2015  // get_global_ids(iFace, &tmp[0], tmp.size(), ranged ? 0 : MESHSET_ORDERED, ids); 2016  //} 2017  } 2018  2019  list.clear(); 2020  rval = iFace->get_parent_meshsets( *i, list ); 2021  CHECK_MB( rval ); 2022  rval = vector_to_id_list( list, specialSets.back().parentIds, true ); 2023  CHECK_MB( rval ); 2024  rval = iFace->get_child_meshsets( *i, list ); 2025  CHECK_MB( rval ); 2026  rval = vector_to_id_list( list, specialSets.back().childIds, true ); 2027  CHECK_MB( rval ); 2028  } 2029  2030  // Process received buffers, repost larger buffers where necessary 2031  size_t remaining = numrecv; 2032  numrecv = 0; 2033  while( remaining-- ) 2034  { 2035  std::vector< unsigned long > dead; 2036  MPI_Status status; 2037  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ); 2038  CHECK_MPI( mperr ); 2039  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG ); 2040  std::vector< unsigned long >& buff = recv_buf[idx]; 2041  size_t size = buff[1] + buff[2] + buff[3] + 4; 2042  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", (unsigned long)size, status.MPI_TAG, 2043  status.MPI_SOURCE ); 2044  if( size <= init_buff_size ) 2045  { 2046  rval = unpack_set( handle, &buff[0], init_buff_size ); 2047  CHECK_MB( rval ); 2048  dead.swap( buff ); // Release memory 2049  } 2050  else 2051  { 2052  // Data was too big for init_buff_size 2053  // repost with larger buffer 2054  buff.resize( size ); 2055  dbgOut.printf( 5, "Re-Posting buffer to receive set %d from proc %d with size %lu\n", status.MPI_TAG, 2056  status.MPI_SOURCE, (unsigned long)size ); 2057  mperr = MPI_Irecv( &buff[0], size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm, 2058  &lrecv_req[idx] ); 2059  CHECK_MPI( mperr ); 2060  ++numrecv; 2061  } 2062  recv_req[idx] = MPI_REQUEST_NULL; 2063  } 2064  2065  // Wait for sends to complete 2066  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE ); 2067  2068  // Re-send sets that didn't fit initial buffer size 2069  idx = 0; 2070  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx ) 2071  { 2072  std::vector< unsigned long >& buff = send_buf[idx]; 2073  size_t size = buff[1] + buff[2] + buff[3] + 4; 2074  if( size <= init_buff_size ) continue; 2075  2076  buff.resize( size ); 2077  rval = pack_set( i, &buff[0], size ); 2078  CHECK_MB( rval ); 2079  EntityHandle remote_handle; 2080  unsigned owner; 2081  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle ); 2082  CHECK_MB( rval ); 2083  2084  int tag = ID_FROM_HANDLE( remote_handle ); 2085  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) ); 2086  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n", (unsigned long)size, tag, owner ); 2087  mperr = MPI_Isend( &buff[0], size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] ); 2088  CHECK_MPI( mperr ); 2089  } 2090  2091  // Process received buffers 2092  remaining = numrecv; 2093  while( remaining-- ) 2094  { 2095  std::vector< unsigned long > dead; 2096  MPI_Status status; 2097  mperr = MPI_Waitany( lrecv_req.size(), &lrecv_req[0], &idx, &status ); 2098  CHECK_MPI( mperr ); 2099  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG ); 2100  std::vector< unsigned long >& buff = recv_buf[idx]; 2101  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", 4 + buff[1] + buff[2] + buff[3], 2102  status.MPI_TAG, status.MPI_SOURCE ); 2103  rval = unpack_set( handle, &buff[0], buff.size() ); 2104  CHECK_MB( rval ); 2105  dead.swap( buff ); // Release memory 2106  2107  lrecv_req[idx] = MPI_REQUEST_NULL; 2108  } 2109  2110  // Wait for sends to complete 2111  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE ); 2112  2113  return MB_SUCCESS; 2114 }

References moab::Range::begin(), CHECK_MB, CHECK_MPI, moab::CREATE_HANDLE(), moab::WriteHDF5::dbgOut, moab::Range::end(), ErrorCode, moab::Interface::get_child_meshsets(), moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_procs(), moab::Interface::get_meshset_options(), moab::Interface::get_parent_meshsets(), moab::ID_FROM_HANDLE(), moab::WriteHDF5::iFace, MB_SUCCESS, MBENTITYSET, moab::Range::merge(), mhdf_SET_RANGE_BIT, myPcomm, pack_set(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::range_to_blocked_list(), moab::WriteHDF5::SpecialSetData::setFlags, moab::WriteHDF5::SpecialSetData::setHandle, size, moab::Range::size(), moab::WriteHDF5::specialSets, moab::DebugOutput::tprintf(), unpack_set(), and moab::WriteHDF5::vector_to_id_list().

Referenced by create_meshset_tables().

◆ communicate_shared_set_ids()

ErrorCode moab::WriteHDF5Parallel::communicate_shared_set_ids ( const Range owned,
const Range remote 
)
protected

Get remote ids for shared sets.

Definition at line 1517 of file WriteHDF5Parallel.cpp.

1518 { 1519  ErrorCode rval; 1520  int mperr; 1521  const int TAG = 0xD0E; 1522  // const unsigned rank = myPcomm->proc_config().proc_rank(); 1523  const MPI_Comm comm = myPcomm->proc_config().proc_comm(); 1524  1525  dbgOut.tprint( 1, "COMMUNICATING SHARED SET IDS\n" ); 1526  dbgOut.print( 6, "Owned, shared sets: ", owned ); 1527  1528  // Post receive buffers for all procs for which we share sets 1529  1530  std::vector< unsigned > procs; 1531  rval = myPcomm->get_entityset_owners( procs ); 1532  CHECK_MB( rval ); 1533  std::vector< unsigned >::iterator it = std::find( procs.begin(), procs.end(), myPcomm->proc_config().proc_rank() ); 1534  if( it != procs.end() ) procs.erase( it ); 1535  1536  std::vector< MPI_Request > recv_req( procs.size(), MPI_REQUEST_NULL ); 1537  std::vector< std::vector< unsigned long > > recv_buf( procs.size() ); 1538  1539  size_t recv_count = 0; 1540  for( size_t i = 0; i < procs.size(); ++i ) 1541  { 1542  Range tmp; 1543  rval = myPcomm->get_owned_sets( procs[i], tmp ); 1544  CHECK_MB( rval ); 1545  size_t count = 1546  intersect( tmp, remote ).size(); // Necessary because we might not be writing all of the database 1547  if( count ) 1548  { 1549  dbgOut.printf( 6, "Sets owned by proc %u (remote handles): ", procs[i] ); 1550  if( dbgOut.get_verbosity() >= 6 ) 1551  { 1552  Range remote_handles; 1553  tmp = intersect( tmp, remote ); 1554  for( Range::iterator j = tmp.begin(); j != tmp.end(); ++j ) 1555  { 1556  unsigned r; 1557  EntityHandle h; 1558  myPcomm->get_entityset_owner( *j, r, &h ); 1559  assert( r == procs[i] ); 1560  remote_handles.insert( h ); 1561  } 1562  dbgOut.print( 6, remote_handles ); 1563  } 1564  recv_count++; 1565  recv_buf[i].resize( 2 * count + 1 ); 1566  dbgOut.printf( 5, "Posting receive buffer of size %lu for proc %u (%lu of %lu owned sets)\n", 1567  (unsigned long)recv_buf[i].size(), procs[i], count, tmp.size() ); 1568  mperr = 1569  MPI_Irecv( &recv_buf[i][0], recv_buf[i].size(), MPI_UNSIGNED_LONG, procs[i], TAG, comm, &recv_req[i] ); 1570  CHECK_MPI( mperr ); 1571  } 1572  } 1573  1574  // Send set ids to all procs with which we share them 1575  1576  // First build per-process lists of sets for which we need to send data 1577  std::map< unsigned, Range > send_sets; 1578  std::vector< unsigned > set_procs; 1579  for( Range::reverse_iterator i = owned.rbegin(); i != owned.rend(); ++i ) 1580  { 1581  set_procs.clear(); 1582  rval = myPcomm->get_entityset_procs( *i, set_procs ); 1583  CHECK_MB( rval ); 1584  for( size_t j = 0; j < set_procs.size(); ++j ) 1585  if( set_procs[j] != myPcomm->proc_config().proc_rank() ) send_sets[set_procs[j]].insert( *i ); 1586  } 1587  assert( send_sets.find( myPcomm->proc_config().proc_rank() ) == send_sets.end() ); 1588  1589  // Now send the data 1590  std::vector< std::vector< unsigned long > > send_buf( send_sets.size() ); 1591  std::vector< MPI_Request > send_req( send_sets.size() ); 1592  std::map< unsigned, Range >::iterator si = send_sets.begin(); 1593  for( size_t i = 0; si != send_sets.end(); ++si, ++i ) 1594  { 1595  dbgOut.printf( 6, "Sending data for shared sets to proc %u: ", si->first ); 1596  dbgOut.print( 6, si->second ); 1597  1598  send_buf[i].reserve( 2 * si->second.size() + 1 ); 1599  send_buf[i].push_back( si->second.size() ); 1600  for( Range::iterator j = si->second.begin(); j != si->second.end(); ++j ) 1601  { 1602  send_buf[i].push_back( *j ); 1603  send_buf[i].push_back( idMap.find( *j ) ); 1604  } 1605  dbgOut.printf( 5, "Sending buffer of size %lu to proc %u (%lu of %lu owned sets)\n", 1606  (unsigned long)send_buf[i].size(), si->first, si->second.size(), owned.size() ); 1607  mperr = MPI_Isend( &send_buf[i][0], send_buf[i].size(), MPI_UNSIGNED_LONG, si->first, TAG, comm, &send_req[i] ); 1608  } 1609  1610  // Process received data 1611  MPI_Status status; 1612  int idx; 1613  while( recv_count-- ) 1614  { 1615  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ); 1616  CHECK_MPI( mperr ); 1617  1618  assert( (unsigned)status.MPI_SOURCE == procs[idx] ); 1619  assert( 2 * recv_buf[idx].front() + 1 == recv_buf[idx].size() ); 1620  const size_t n = std::min< size_t >( recv_buf[idx].front(), ( recv_buf[idx].size() - 1 ) / 2 ); 1621  dbgOut.printf( 5, "Received buffer of size %lu from proc %d\n", (unsigned long)( 2 * n + 1 ), 1622  (int)status.MPI_SOURCE ); 1623  1624  for( size_t i = 0; i < n; ++i ) 1625  { 1626  EntityHandle handle = 0; 1627  rval = myPcomm->get_entityset_local_handle( procs[idx], recv_buf[idx][2 * i + 1], handle ); 1628  CHECK_MB( rval ); 1629  assert( handle != 0 ); 1630  if( !idMap.insert( handle, recv_buf[idx][2 * i + 2], 1 ).second ) 1631  error( MB_FAILURE ); // Conflicting IDs?????? 1632  } 1633  1634  recv_req[idx] = MPI_REQUEST_NULL; 1635  } 1636  assert( MPI_SUCCESS == MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ) && 1637  MPI_UNDEFINED == idx ); // Check that we got them all 1638  1639  // Wait for all sends to complete before we release send 1640  // buffers (implicitly releases when we return from this function) 1641  1642  std::vector< MPI_Status > stats( send_req.size() ); 1643  mperr = MPI_Waitall( send_req.size(), &send_req[0], &stats[0] ); 1644  CHECK_MPI( mperr ); 1645  1646  if( dbgOut.get_verbosity() >= SSVB ) print_shared_sets(); 1647  1648  return MB_SUCCESS; 1649 }

References moab::Range::begin(), CHECK_MB, CHECK_MPI, moab::WriteHDF5::dbgOut, moab::Range::end(), moab::error(), ErrorCode, moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_entityset_local_handle(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_owners(), moab::ParallelComm::get_entityset_procs(), moab::ParallelComm::get_owned_sets(), moab::DebugOutput::get_verbosity(), moab::WriteHDF5::idMap, moab::Range::insert(), moab::RangeMap< KeyType, ValType, NullVal >::insert(), moab::intersect(), MB_SUCCESS, myPcomm, moab::DebugOutput::print(), print_shared_sets(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::Range::rbegin(), moab::Range::rend(), size, moab::Range::size(), moab::SSVB, and moab::DebugOutput::tprint().

Referenced by create_meshset_tables().

◆ create_adjacency_tables()

ErrorCode moab::WriteHDF5Parallel::create_adjacency_tables ( )
protected

Create tables to hold element adjacencies.

Definition at line 1405 of file WriteHDF5Parallel.cpp.

1406 { 1407  struct AdjSetCreator : public DataSetCreator 1408  { 1409  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const 1410  { 1411  mhdf_Status status; 1412  hid_t handle = mhdf_createAdjacency( file->file_ptr(), ex->name(), size, &status ); 1413  CHECK_HDFN( status ); 1414  mhdf_closeData( file->file_ptr(), handle, &status ); 1415  CHECK_HDFN( status ); 1416  start_id = -1; 1417  return MB_SUCCESS; 1418  } 1419  }; 1420  1421  std::vector< ExportSet* > groups; 1422 #ifdef WRITE_NODE_ADJACENCIES 1423  groups.push_back( &nodeSet ); 1424 #endif 1425  for( std::list< ExportSet >::iterator ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter ) 1426  groups.push_back( &*ex_iter ); 1427  1428  ErrorCode rval; 1429  const int numtypes = groups.size(); 1430  if (numtypes > 0) 1431  { 1432  std::vector< long > counts( numtypes ); 1433  std::vector< long > offsets( numtypes ); 1434  std::vector< long > max_ents( numtypes ); 1435  std::vector< long > totals( numtypes ); 1436  for( int i = 0; i < numtypes; ++i ) 1437  { 1438  wid_t count; 1439  rval = count_adjacencies( groups[i]->range, count ); 1440  CHECK_MB( rval ); 1441  counts[i] = count; 1442  } 1443  1444  rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &totals[0], AdjSetCreator(), &groups[0] ); 1445  CHECK_MB( rval ); 1446  1447  // Cppcheck warning (false positive): variable groups is assigned a value that is never used 1448  for( int i = 0; i < numtypes; ++i ) 1449  { 1450  groups[i]->max_num_adjs = max_ents[i]; 1451  groups[i]->adj_offset = offsets[i]; 1452  } 1453  } 1454  return MB_SUCCESS; 1455 }

References CHECK_HDFN, CHECK_MB, moab::WriteHDF5::count_adjacencies(), create_dataset(), ErrorCode, moab::WriteHDF5::exportList, moab::WriteHDF5::file_ptr(), MB_SUCCESS, mhdf_closeData(), mhdf_createAdjacency(), moab::WriteHDF5::ExportSet::name(), moab::WriteHDF5::nodeSet, and size.

Referenced by parallel_create_file().

◆ create_dataset()

ErrorCode moab::WriteHDF5Parallel::create_dataset ( int  num_datasets,
const long *  num_owned_entities,
long *  offsets_out,
long *  max_proc_ents_out,
long *  total_ents_out,
const DataSetCreator creator = NoopDescCreator(),
ExportSet groups[] = 0,
wid_t first_ids_out = NULL 
)
protected

Do typical communication for dataset creation.

Given the number of entities each processor intends to write, do necessary communication and create dataset on root, passing back misc info to each proc.

Parameters
creatorFunctor to do actual dataset creation. Used only on root process.
num_datasetsThe number of datasets to create.
groupsThird argument passed to DataSetCreator. Array of length num_datasets pr NULL.
num_owned_entitiesThe number of entities this proc will write. Array of length num_datasets .
offsets_outOutput: The offset in the dataset at which this process should write. Array of length num_datasets .
max_proc_ents_outOutput: The maximun number of entities that any proc will write Array of length num_datasets .
total_ents_outOutput: The size of the created dataset (sum of counts over all procs) Array of length num_datasets .
first_ids_outOutput: The first ID of the first entity in the data set. First ID for this proc's entities is first_id_out+offset_out Array of length num_datasets or NULL.

Definition at line 1080 of file WriteHDF5Parallel.cpp.

1088 { 1089  int result; 1090  ErrorCode rval; 1091  const unsigned rank = myPcomm->proc_config().proc_rank(); 1092  const unsigned nproc = myPcomm->proc_config().proc_size(); 1093  const MPI_Comm comm = myPcomm->proc_config().proc_comm(); 1094  1095  // Gather entity counts for each processor on root 1096  std::vector< long > counts( rank ? 0 : nproc * num_datasets ); 1097  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &num_owned, sizeof( long ) ); 1098  result = MPI_Gather( const_cast< long* >( num_owned ), num_datasets, MPI_LONG, &counts[0], num_datasets, MPI_LONG, 1099  0, comm ); 1100  CHECK_MPI( result ); 1101  1102  // Create node data in file 1103  DatasetVals zero_val = { 0, 0, 0 }; 1104  std::vector< DatasetVals > cumulative( num_datasets, zero_val ); 1105  if( rank == 0 ) 1106  { 1107  for( unsigned i = 0; i < nproc; i++ ) 1108  { 1109  const long* proc_data = &counts[i * num_datasets]; 1110  for( int index = 0; index < num_datasets; ++index ) 1111  { 1112  cumulative[index].total += proc_data[index]; 1113  if( proc_data[index] > cumulative[index].max_count ) cumulative[index].max_count = proc_data[index]; 1114  } 1115  } 1116  1117  for( int index = 0; index < num_datasets; ++index ) 1118  { 1119  if( cumulative[index].total ) 1120  { 1121  rval = creator( this, cumulative[index].total, groups ? groups[index] : 0, cumulative[index].start_id ); 1122  CHECK_MB( rval ); 1123  } 1124  else 1125  { 1126  cumulative[index].start_id = -1; 1127  } 1128  } 1129  } 1130  1131  // Send id offset to every proc 1132  result = MPI_Bcast( (void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm ); 1133  CHECK_MPI( result ); 1134  for( int index = 0; index < num_datasets; ++index ) 1135  { 1136  if( first_ids_out ) first_ids_out[index] = (wid_t)cumulative[index].start_id; 1137  max_proc_entities[index] = cumulative[index].max_count; 1138  total_entities[index] = cumulative[index].total; 1139  } 1140  1141  // Convert array of per-process counts to per-process offsets 1142  if( rank == 0 ) 1143  { 1144  // Initialize prev_size with data sizes for root process 1145  std::vector< long > prev_size( counts.begin(), counts.begin() + num_datasets ); 1146  // Root process gets offset zero 1147  std::fill( counts.begin(), counts.begin() + num_datasets, 0L ); 1148  // For each proc other than this one (root) 1149  for( unsigned i = 1; i < nproc; ++i ) 1150  { 1151  // Get pointer to offsets for previous process in list 1152  long* prev_data = &counts[( i - 1 ) * num_datasets]; 1153  // Get pointer to offsets for this process in list 1154  long* proc_data = &counts[i * num_datasets]; 1155  // For each data set 1156  for( int j = 0; j < num_datasets; ++j ) 1157  { 1158  // Get size of data in dataset from process i 1159  long mysize = proc_data[j]; 1160  // Offset for process i is offset of previous process plus 1161  // number of values previous process will write 1162  proc_data[j] = prev_data[j] + prev_size[j]; 1163  // Store my size, as it is no longer available in 'counts' 1164  prev_size[j] = mysize; 1165  } 1166  } 1167  } 1168  1169  // Send each proc it's offset in the table 1170  if( rank == 0 ) 1171  { 1172  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &counts[0], num_datasets * nproc * sizeof( long ) ); 1173  } 1174  result = MPI_Scatter( &counts[0], num_datasets, MPI_LONG, offsets_out, num_datasets, MPI_LONG, 0, comm ); 1175  CHECK_MPI( result ); 1176  1177  return MB_SUCCESS; 1178 }

References CHECK_MB, CHECK_MPI, ErrorCode, MB_SUCCESS, myPcomm, moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::ProcConfig::proc_size(), and VALGRIND_CHECK_MEM_IS_DEFINED.

Referenced by create_adjacency_tables(), create_element_tables(), create_meshset_tables(), create_node_table(), and create_tag_tables().

◆ create_element_tables()

ErrorCode moab::WriteHDF5Parallel::create_element_tables ( )
protected

Create tables to hold element connectivity.

Definition at line 1363 of file WriteHDF5Parallel.cpp.

1364 { 1365  struct ElemSetCreator : public DataSetCreator 1366  { 1367  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const 1368  { 1369  return file->create_elem_table( *ex, size, start_id ); 1370  } 1371  }; 1372  1373  const int numtypes = exportList.size(); 1374  if (numtypes > 0) 1375  { 1376  std::vector< ExportSet* > groups( numtypes ); 1377  std::vector< long > counts( numtypes ), offsets( numtypes ), max_ents( numtypes ), total_ents( numtypes ); 1378  std::vector< wid_t > start_ids( numtypes ); 1379  1380  size_t idx = 0; 1381  std::list< ExportSet >::iterator ex_iter; 1382  for( ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx ) 1383  { 1384  groups[idx] = &*ex_iter; 1385  counts[idx] = ex_iter->range.size(); 1386  } 1387  ErrorCode rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &total_ents[0], ElemSetCreator(), 1388  &groups[0], &start_ids[0] ); 1389  CHECK_MB( rval ); 1390  1391  for( idx = 0, ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx ) 1392  { 1393  ex_iter->first_id = start_ids[idx]; 1394  ex_iter->offset = offsets[idx]; 1395  ex_iter->max_num_ents = max_ents[idx]; 1396  ex_iter->total_num_ents = total_ents[idx]; 1397  rval = assign_ids( ex_iter->range, ex_iter->first_id + ex_iter->offset ); 1398  CHECK_MB( rval ); 1399  } 1400  } 1401  1402  return MB_SUCCESS; 1403 }

References moab::WriteHDF5::assign_ids(), CHECK_MB, create_dataset(), moab::WriteHDF5::create_elem_table(), ErrorCode, moab::WriteHDF5::exportList, MB_SUCCESS, and size.

Referenced by parallel_create_file().

◆ create_meshset_tables()

ErrorCode moab::WriteHDF5Parallel::create_meshset_tables ( double *  times)
protected

Create tables for mesh sets.

Definition at line 2116 of file WriteHDF5Parallel.cpp.

2117 { 2118  Range::const_iterator riter; 2119  const unsigned rank = myPcomm->proc_config().proc_rank(); 2120  2121  START_SERIAL; 2122  print_type_sets( iFace, &dbgOut, setSet.range ); 2123  END_SERIAL; 2124  CpuTimer timer; 2125  2126  // Remove remote sets from setSets 2127  Range shared, owned, remote; 2128  ErrorCode rval = myPcomm->get_shared_sets( shared ); 2129  CHECK_MB( rval ); 2130  shared = intersect( shared, setSet.range ); 2131  rval = myPcomm->get_owned_sets( rank, owned ); 2132  CHECK_MB( rval ); 2133  owned = intersect( owned, setSet.range ); 2134  remote = subtract( shared, owned ); 2135  setSet.range = subtract( setSet.range, remote ); 2136  2137  // Create set meta table 2138  struct SetDescCreator : public DataSetCreator 2139  { 2140  ErrorCode operator()( WriteHDF5* writer, long size, const ExportSet*, long& start_id ) const 2141  { 2142  return writer->create_set_meta( size, start_id ); 2143  } 2144  }; 2145  long count = setSet.range.size(); 2146  rval = create_dataset( 1, &count, &setSet.offset, &setSet.max_num_ents, &setSet.total_num_ents, SetDescCreator(), 2147  NULL, &setSet.first_id ); 2148  CHECK_MB( rval ); 2149  writeSets = setSet.max_num_ents > 0; 2150  2151  rval = assign_ids( setSet.range, setSet.first_id + setSet.offset ); 2152  CHECK_MB( rval ); 2153  if( times ) times[SET_OFFSET_TIME] = timer.time_elapsed(); 2154  2155  // Exchange file IDS for sets between all procs 2156  rval = communicate_shared_set_ids( owned, remote ); 2157  CHECK_MB( rval ); 2158  if( times ) times[SHARED_SET_IDS] = timer.time_elapsed(); 2159  2160  // Communicate remote set contents, children, etc. 2161  rval = communicate_shared_set_data( owned, remote ); 2162  CHECK_MB( rval ); 2163  if( times ) times[SHARED_SET_CONTENTS] = timer.time_elapsed(); 2164  2165  // Communicate counts for owned sets 2166  long data_counts[3]; // { #contents, #children, #parents } 2167  rval = count_set_size( setSet.range, data_counts[0], data_counts[1], data_counts[2] ); 2168  CHECK_MB( rval ); 2169  if( times ) times[SET_OFFSET_TIME] += timer.time_elapsed(); 2170  2171  long offsets[3], max_counts[3], totals[3]; 2172  rval = create_dataset( 3, data_counts, offsets, max_counts, totals ); 2173  CHECK_MB( rval ); 2174  2175  // Create the datasets 2176  if( 0 == myPcomm->proc_config().proc_rank() ) 2177  { 2178  rval = create_set_tables( totals[0], totals[1], totals[2] ); 2179  CHECK_MB( rval ); 2180  } 2181  2182  // Store communicated global data 2183  setContentsOffset = offsets[0]; 2184  setChildrenOffset = offsets[1]; 2185  setParentsOffset = offsets[2]; 2186  maxNumSetContents = max_counts[0]; 2187  maxNumSetChildren = max_counts[1]; 2188  maxNumSetParents = max_counts[2]; 2189  writeSetContents = totals[0] > 0; 2190  writeSetChildren = totals[1] > 0; 2191  writeSetParents = totals[2] > 0; 2192  2193  dbgOut.printf( 2, "set contents: %ld local, %ld global, offset = %ld\n", data_counts[0], totals[0], offsets[0] ); 2194  dbgOut.printf( 2, "set children: %ld local, %ld global, offset = %ld\n", data_counts[1], totals[1], offsets[1] ); 2195  dbgOut.printf( 2, "set parents: %ld local, %ld global, offset = %ld\n", data_counts[2], totals[2], offsets[2] ); 2196  2197  return MB_SUCCESS; 2198 }

References moab::WriteHDF5::assign_ids(), CHECK_MB, communicate_shared_set_data(), communicate_shared_set_ids(), moab::WriteHDF5::count_set_size(), create_dataset(), moab::WriteHDF5::create_set_meta(), moab::WriteHDF5::create_set_tables(), moab::WriteHDF5::dbgOut, END_SERIAL, ErrorCode, moab::WriteHDF5::ExportSet::first_id, moab::ParallelComm::get_owned_sets(), moab::ParallelComm::get_shared_sets(), moab::WriteHDF5::iFace, moab::intersect(), moab::WriteHDF5::ExportSet::max_num_ents, moab::WriteHDF5::maxNumSetChildren, moab::WriteHDF5::maxNumSetContents, moab::WriteHDF5::maxNumSetParents, MB_SUCCESS, myPcomm, moab::WriteHDF5::ExportSet::offset, moab::print_type_sets(), moab::DebugOutput::printf(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::SET_OFFSET_TIME, moab::WriteHDF5::setChildrenOffset, moab::WriteHDF5::setContentsOffset, moab::WriteHDF5::setParentsOffset, moab::WriteHDF5::setSet, moab::WriteHDF5::SHARED_SET_CONTENTS, moab::WriteHDF5::SHARED_SET_IDS, size, moab::Range::size(), START_SERIAL, moab::subtract(), moab::CpuTimer::time_elapsed(), moab::WriteHDF5::ExportSet::total_num_ents, moab::WriteHDF5::writeSetChildren, moab::WriteHDF5::writeSetContents, moab::WriteHDF5::writeSetParents, and moab::WriteHDF5::writeSets.

Referenced by parallel_create_file().

◆ create_node_table()

ErrorCode moab::WriteHDF5Parallel::create_node_table ( int  dimension)
protected

Create the node table in the file.

Definition at line 1180 of file WriteHDF5Parallel.cpp.

1181 { 1182  nodeSet.num_nodes = dimension; // Put it here so NodeSetCreator can access it 1183  struct NodeSetCreator : public DataSetCreator 1184  { 1185  ErrorCode operator()( WriteHDF5* file, long count, const ExportSet* group, long& start_id ) const 1186  { 1187  mhdf_Status status; 1188  hid_t handle = mhdf_createNodeCoords( file->file_ptr(), group->num_nodes, count, &start_id, &status ); 1189  CHECK_HDFN( status ); 1190  mhdf_closeData( file->file_ptr(), handle, &status ); 1191  CHECK_HDFN( status ); 1192  return MB_SUCCESS; 1193  } 1194  }; 1195  1196  const long count = nodeSet.range.size(); 1197  ExportSet* array[] = { &nodeSet }; 1198  ErrorCode rval = create_dataset( 1, &count, &nodeSet.offset, &nodeSet.max_num_ents, &nodeSet.total_num_ents, 1199  NodeSetCreator(), array, &nodeSet.first_id ); 1200  CHECK_MB( rval ); 1201  return assign_ids( nodeSet.range, nodeSet.first_id + nodeSet.offset ); 1202 }

References moab::WriteHDF5::assign_ids(), CHECK_HDFN, CHECK_MB, create_dataset(), ErrorCode, moab::WriteHDF5::file_ptr(), moab::WriteHDF5::ExportSet::first_id, moab::WriteHDF5::ExportSet::max_num_ents, MB_SUCCESS, mhdf_closeData(), mhdf_createNodeCoords(), moab::WriteHDF5::nodeSet, moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::offset, moab::WriteHDF5::ExportSet::range, moab::Range::size(), and moab::WriteHDF5::ExportSet::total_num_ents.

Referenced by parallel_create_file().

◆ create_tag_tables()

ErrorCode moab::WriteHDF5Parallel::create_tag_tables ( )
protected

Write tag descriptions and create tables to hold tag data.

Definition at line 753 of file WriteHDF5Parallel.cpp.

754 { 755  std::list< TagDesc >::iterator tag_iter; 756  ErrorCode rval; 757  int err; 758  const int rank = myPcomm->proc_config().proc_rank(); 759  const MPI_Comm comm = myPcomm->proc_config().proc_comm(); 760  761  subState.start( "negotiating tag list" ); 762  763  dbgOut.tprint( 1, "communicating tag metadata\n" ); 764  765  dbgOut.printf( 2, "Exchanging tag data for %d tags.\n", (int)tagList.size() ); 766  767  // Sort tagList contents in alphabetical order by tag name 768  tagList.sort( TagNameCompare( iFace ) ); 769  770  // Negotiate total list of tags to write 771  772  // Build concatenated list of all tag data 773  std::vector< unsigned char > tag_buffer; 774  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter ) 775  { 776  rval = append_serial_tag_data( tag_buffer, *tag_iter ); 777  CHECK_MB( rval ); 778  } 779  780  // Broadcast list from root to all other procs 781  unsigned long size = tag_buffer.size(); 782  err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, comm ); 783  CHECK_MPI( err ); 784  tag_buffer.resize( size ); 785  err = MPI_Bcast( &tag_buffer[0], size, MPI_UNSIGNED_CHAR, 0, comm ); 786  CHECK_MPI( err ); 787  788  // Update local tag list 789  std::vector< TagDesc* > missing; 790  rval = check_serial_tag_data( tag_buffer, &missing, 0 ); 791  CHECK_MB( rval ); 792  793  // Check if we're done (0->done, 1->more, 2+->error) 794  int code, lcode = ( MB_SUCCESS != rval ) ? rval + 2 : missing.empty() ? 0 : 1; 795  err = MPI_Allreduce( &lcode, &code, 1, MPI_INT, MPI_MAX, comm ); 796  CHECK_MPI( err ); 797  if( code > 1 ) 798  { 799  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" ); 800  return error( (ErrorCode)( code - 2 ) ); 801  } 802  803  // If not done... 804  if( code ) 805  { 806  dbgOut.print( 1, "Not all procs had same tag definitions, negotiating...\n" ); 807  808  // Get tags defined on this proc but not on root proc 809  tag_buffer.clear(); 810  for( size_t i = 0; i < missing.size(); ++i ) 811  { 812  rval = append_serial_tag_data( tag_buffer, *missing[i] ); 813  CHECK_MB( rval ); 814  } 815  816  // Gather extra tag definitions on root processor 817  std::vector< int > junk; // don't care how many from each proc 818  assert( rank || tag_buffer.empty() ); // must be empty on root 819  err = my_Gatherv( &tag_buffer[0], tag_buffer.size(), MPI_UNSIGNED_CHAR, tag_buffer, junk, 0, comm ); 820  CHECK_MPI( err ); 821  822  // Process serialized tag descriptions on root, and 823  rval = MB_SUCCESS; 824  if( 0 == rank ) 825  { 826  // Process serialized tag descriptions on root, and 827  std::vector< TagDesc* > newlist; 828  rval = check_serial_tag_data( tag_buffer, 0, &newlist ); 829  tag_buffer.clear(); 830  // re-serialize a unique list of new tag definitions 831  for( size_t i = 0; MB_SUCCESS == rval && i != newlist.size(); ++i ) 832  { 833  rval = append_serial_tag_data( tag_buffer, *newlist[i] ); 834  CHECK_MB( rval ); 835  } 836  } 837  838  // Broadcast any new tag definitions from root to other procs 839  long this_size = tag_buffer.size(); 840  if( MB_SUCCESS != rval ) this_size = -rval; 841  err = MPI_Bcast( &this_size, 1, MPI_LONG, 0, comm ); 842  CHECK_MPI( err ); 843  if( this_size < 0 ) 844  { 845  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" ); 846  return error( (ErrorCode)-this_size ); 847  } 848  tag_buffer.resize( this_size ); 849  err = MPI_Bcast( &tag_buffer[0], this_size, MPI_UNSIGNED_CHAR, 0, comm ); 850  CHECK_MPI( err ); 851  852  // Process new tag definitions 853  rval = check_serial_tag_data( tag_buffer, 0, 0 ); 854  CHECK_MB( rval ); 855  } 856  857  subState.end(); 858  subState.start( "negotiate which element/tag combinations are dense" ); 859  860  // Figure out for which tag/element combinations we can 861  // write dense tag data. 862  863  // Construct a table of bits, 864  // where each row of the table corresponds to a tag 865  // and each column to an element group. 866  867  // Two extra, because first is nodes and last is sets. 868  // (n+7)/8 is ceil(n/8) 869  const int bytes_per_tag = ( exportList.size() + 9 ) / 8; 870  std::vector< unsigned char > data( bytes_per_tag * tagList.size(), 0 ); 871  std::vector< unsigned char > recv( data.size(), 0 ); 872  unsigned char* iter = &data[0]; 873  if( writeTagDense && !data.empty() ) 874  { 875  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, iter += bytes_per_tag ) 876  { 877  878  Range tagged; 879  rval = get_sparse_tagged_entities( *tag_iter, tagged ); 880  CHECK_MB( rval ); 881  882  int s; 883  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) continue; 884  885  std::string n; 886  iFace->tag_get_name( tag_iter->tag_id, 887  n ); // Second time we've called, so shouldn't fail 888  889  // Check if we want to write this tag in dense format even if not 890  // all of the entities have a tag value. The criterion of this 891  // is that the tag be dense, have a default value, and have at 892  // least 2/3 of the entities tagged. 893  bool prefer_dense = false; 894  TagType type; 895  rval = iFace->tag_get_type( tag_iter->tag_id, type ); 896  CHECK_MB( rval ); 897  if( MB_TAG_DENSE == type ) 898  { 899  const void* defval = 0; 900  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s ); 901  if( MB_SUCCESS == rval ) prefer_dense = true; 902  } 903  904  int i = 0; 905  if( check_dense_format_tag( nodeSet, tagged, prefer_dense ) ) 906  { 907  set_bit( i, iter ); 908  dbgOut.printf( 2, "Can write dense data for \"%s\"/Nodes\n", n.c_str() ); 909  } 910  std::list< ExportSet >::const_iterator ex_iter = exportList.begin(); 911  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter ) 912  { 913  // when writing in parallel, on some partitions, some of these element ranges might 914  // be empty so do not turn this tag as sparse, just because of that, leave it dense, 915  // if we prefer dense 916  if( ( prefer_dense && ex_iter->range.empty() ) || 917  check_dense_format_tag( *ex_iter, tagged, prefer_dense ) ) 918  { 919  set_bit( i, iter ); 920  dbgOut.printf( 2, "Can write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() ); 921  } 922  } 923  if( check_dense_format_tag( setSet, tagged, prefer_dense ) ) 924  { 925  set_bit( i, iter ); 926  dbgOut.printf( 2, "Can write dense data for \"%s\"/Sets\n", n.c_str() ); 927  } 928  } 929  930  // Do bit-wise AND of list over all processors (only write dense format 931  // if all proccesses want dense format for this group of entities). 932  err = MPI_Allreduce( &data[0], &recv[0], data.size(), MPI_UNSIGNED_CHAR, MPI_BAND, 933  myPcomm->proc_config().proc_comm() ); 934  CHECK_MPI( err ); 935  } // if (writeTagDense) 936  937  // Store initial counts for sparse-formatted tag data. 938  // The total number of values to send and receive will be the number of 939  // tags plus the number of var-len tags because we need to negotiate 940  // offsets into two different tables for the var-len tags. 941  std::vector< long > counts; 942  943  // Record dense tag/element combinations 944  iter = &recv[0]; 945  const unsigned char* iter2 = &data[0]; 946  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); 947  ++tag_iter, iter += bytes_per_tag, iter2 += bytes_per_tag ) 948  { 949  950  Range tagged; 951  rval = get_sparse_tagged_entities( *tag_iter, tagged ); 952  CHECK_MB( rval ); 953  954  std::string n; 955  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail 956  957  int i = 0; 958  if( get_bit( i, iter ) ) 959  { 960  assert( get_bit( i, iter2 ) ); 961  tag_iter->dense_list.push_back( nodeSet ); 962  tagged -= nodeSet.range; 963  dbgOut.printf( 2, "Will write dense data for \"%s\"/Nodes\n", n.c_str() ); 964  } 965  std::list< ExportSet >::const_iterator ex_iter = exportList.begin(); 966  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter ) 967  { 968  if( get_bit( i, iter ) ) 969  { 970  assert( get_bit( i, iter2 ) ); 971  tag_iter->dense_list.push_back( *ex_iter ); 972  dbgOut.printf( 2, "WIll write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() ); 973  tagged -= ex_iter->range; 974  } 975  } 976  if( get_bit( i, iter ) ) 977  { 978  assert( get_bit( i, iter2 ) ); 979  tag_iter->dense_list.push_back( setSet ); 980  dbgOut.printf( 2, "Will write dense data for \"%s\"/Sets\n", n.c_str() ); 981  tagged -= setSet.range; 982  } 983  984  counts.push_back( tagged.size() ); 985  986  int s; 987  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) 988  { 989  unsigned long data_len; 990  rval = get_tag_data_length( *tag_iter, tagged, data_len ); 991  CHECK_MB( rval ); 992  counts.push_back( data_len ); 993  } 994  } 995  996  subState.end(); 997  subState.start( "Negotiate offsets for sparse tag info" ); 998  999  std::vector< long > offsets( counts.size() ), maxima( counts.size() ), totals( counts.size() ); 1000  rval = create_dataset( counts.size(), &counts[0], &offsets[0], &maxima[0], &totals[0] ); 1001  CHECK_MB( rval ); 1002  1003  // Copy values into local structs and if root then create tables 1004  size_t idx = 0; 1005  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx ) 1006  { 1007  assert( idx < counts.size() ); 1008  tag_iter->sparse_offset = offsets[idx]; 1009  tag_iter->max_num_ents = maxima[idx]; 1010  tag_iter->write_sparse = ( 0 != totals[idx] ); 1011  int s; 1012  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) 1013  { 1014  ++idx; 1015  assert( idx < counts.size() ); 1016  tag_iter->var_data_offset = offsets[idx]; 1017  tag_iter->max_num_vals = maxima[idx]; 1018  } 1019  else 1020  { 1021  tag_iter->var_data_offset = 0; 1022  tag_iter->max_num_vals = 0; 1023  } 1024  } 1025  1026  subState.end(); 1027  1028  // Create tag tables on root process 1029  if( 0 == myPcomm->proc_config().proc_rank() ) 1030  { 1031  size_t iidx = 0; 1032  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx ) 1033  { 1034  assert( iidx < totals.size() ); 1035  unsigned long num_ents = totals[iidx]; 1036  unsigned long num_val = 0; 1037  int s; 1038  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) 1039  { 1040  ++iidx; 1041  assert( iidx < totals.size() ); 1042  num_val = totals[iidx]; 1043  } 1044  dbgOut.printf( 2, "Writing tag description for tag 0x%lx with %lu values\n", 1045  (unsigned long)tag_iter->tag_id, num_val ? num_val : num_ents ); 1046  1047  rval = create_tag( *tag_iter, num_ents, num_val ); 1048  if( MB_SUCCESS != rval ) return error( rval ); 1049  } 1050  } 1051  1052  if( dbgOut.get_verbosity() > 1 ) 1053  { 1054  dbgOut.printf( 2, "Tags: %12s %8s %8s %8s %8s %8s\n", "Name", "Count", "Offset", "Var Off", "Max Ent", 1055  "Handle" ); 1056  1057  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter ) 1058  { 1059  std::string name; 1060  iFace->tag_get_name( tag_iter->tag_id, name ); 1061  size_t this_size; 1062  get_num_sparse_tagged_entities( *tag_iter, this_size ); 1063  dbgOut.printf( 2, "%18s %8lu %8lu %8lu %8lu 0x%7lx\n", name.c_str(), (unsigned long)this_size, 1064  (unsigned long)tag_iter->sparse_offset, (unsigned long)tag_iter->var_data_offset, 1065  (unsigned long)tag_iter->max_num_ents, (unsigned long)tag_iter->tag_id ); 1066  } 1067  } 1068  1069  return MB_SUCCESS; 1070 }

References append_serial_tag_data(), moab::WriteHDF5::check_dense_format_tag(), CHECK_MB, CHECK_MPI, check_serial_tag_data(), create_dataset(), moab::WriteHDF5::create_tag(), moab::WriteHDF5::dbgOut, moab::error(), ErrorCode, moab::WriteHDF5::exportList, moab::get_bit(), moab::WriteHDF5::get_num_sparse_tagged_entities(), moab::WriteHDF5::get_sparse_tagged_entities(), moab::WriteHDF5::get_tag_data_length(), moab::DebugOutput::get_verbosity(), moab::WriteHDF5::iFace, MB_SET_ERR_CONT, MB_SUCCESS, MB_TAG_DENSE, MB_VARIABLE_DATA_LENGTH, moab::my_Gatherv(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::ExportSet::range, moab::set_bit(), moab::WriteHDF5::setSet, size, moab::Range::size(), moab::WriteHDF5::subState, moab::Interface::tag_get_default_value(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::Interface::tag_get_type(), moab::WriteHDF5::tagList, TagType, moab::DebugOutput::tprint(), and moab::WriteHDF5::writeTagDense.

Referenced by parallel_create_file().

◆ debug_barrier_line()

void moab::WriteHDF5Parallel::debug_barrier_line ( int  lineno)
protectedvirtual

Reimplemented from moab::WriteHDF5.

Definition at line 263 of file WriteHDF5Parallel.cpp.

264 { 265  const unsigned threshold = 2; 266  static unsigned long count = 0; 267  if( dbgOut.get_verbosity() >= threshold && myPcomm ) 268  { 269  dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno ); 270  MPI_Barrier( myPcomm->proc_config().proc_comm() ); 271  } 272 }

References moab::WriteHDF5::dbgOut, moab::DebugOutput::get_verbosity(), myPcomm, moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), and moab::ParallelComm::proc_config().

◆ exchange_file_ids()

ErrorCode moab::WriteHDF5Parallel::exchange_file_ids ( const Range non_local_ents)
protected

For entities that will be written by another processor but are referenced by entities on this processor, get the file Ids that will be assigned to those so they can be referenced by entities to be written on this processor.

Parameters
non_local_entsList of entities that are not to be written by this processor but are referenced by other entities that are to be written.

Definition at line 2263 of file WriteHDF5Parallel.cpp.

2264 { 2265  ErrorCode rval; 2266  2267  // For each entity owned on the interface, write its file id to 2268  // a tag. The sets of entities to be written should already contain 2269  // only owned entities so by intersecting with them we not only 2270  // filter by entities to be written, but also restrict to entities 2271  // owned by the proc 2272  2273  // Get list of interface entities 2274  Range imesh, tmp; 2275  for( std::list< ExportSet >::reverse_iterator i = exportList.rbegin(); i != exportList.rend(); ++i ) 2276  { 2277  tmp.clear(); 2278  rval = myPcomm->filter_pstatus( i->range, PSTATUS_SHARED, PSTATUS_AND, -1, &tmp ); 2279  if( MB_SUCCESS != rval ) return error( rval ); 2280  imesh.merge( tmp ); 2281  } 2282  tmp.clear(); 2283  rval = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_SHARED, PSTATUS_AND, -1, &tmp ); 2284  if( MB_SUCCESS != rval ) return error( rval ); 2285  imesh.merge( tmp ); 2286  2287  // Create tag to store file IDs 2288  EntityHandle default_val = 0; 2289  Tag file_id_tag = 0; 2290  rval = iFace->tag_get_handle( "__hdf5_ll_fileid", 1, MB_TYPE_HANDLE, file_id_tag, MB_TAG_DENSE | MB_TAG_CREAT, 2291  &default_val ); 2292  if( MB_SUCCESS != rval ) return error( rval ); 2293  2294  // Copy file IDs into tag 2295  std::vector< EntityHandle > file_id_vect( imesh.size() ); 2296  Range::const_iterator i; 2297  std::vector< EntityHandle >::iterator j = file_id_vect.begin(); 2298  for( i = imesh.begin(); i != imesh.end(); ++i, ++j ) 2299  { 2300  *j = idMap.find( *i ); 2301  if( !*j ) 2302  { 2303  iFace->tag_delete( file_id_tag ); 2304  return error( MB_FAILURE ); 2305  } 2306  } 2307  rval = iFace->tag_set_data( file_id_tag, imesh, &file_id_vect[0] ); 2308  if( MB_SUCCESS != rval ) 2309  { 2310  iFace->tag_delete( file_id_tag ); 2311  return error( rval ); 2312  } 2313  2314  // Do communication 2315  rval = myPcomm->exchange_tags( file_id_tag, imesh ); 2316  if( MB_SUCCESS != rval ) 2317  { 2318  iFace->tag_delete( file_id_tag ); 2319  return error( rval ); 2320  } 2321  2322  // Copy file IDs from tag into idMap for remote entities 2323  file_id_vect.resize( nonlocal.size() ); 2324  rval = iFace->tag_get_data( file_id_tag, nonlocal, &file_id_vect[0] ); 2325  if( MB_SUCCESS != rval ) 2326  { 2327  iFace->tag_delete( file_id_tag ); 2328  return error( rval ); 2329  } 2330  2331  j = file_id_vect.begin(); 2332  for( i = nonlocal.begin(); i != nonlocal.end(); ++i, ++j ) 2333  { 2334  if( *j == 0 ) 2335  { 2336  int owner = -1; 2337  myPcomm->get_owner( *i, owner ); 2338  const char* name = CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) ); 2339  int id = ID_FROM_HANDLE( *i ); 2340  MB_SET_ERR_CONT( "Process " << myPcomm->proc_config().proc_rank() 2341  << " did not receive valid id handle for shared " << name << " " << id 2342  << " owned by process " << owner ); 2343  dbgOut.printf( 1, 2344  "Did not receive valid remote id for " 2345  "shared %s %d owned by process %d", 2346  name, id, owner ); 2347  iFace->tag_delete( file_id_tag ); 2348  return error( MB_FAILURE ); 2349  } 2350  else 2351  { 2352  if( !idMap.insert( *i, *j, 1 ).second ) 2353  { 2354  iFace->tag_delete( file_id_tag ); 2355  return error( MB_FAILURE ); 2356  } 2357  } 2358  } 2359  2360 #ifndef NDEBUG 2361  // Check that writer is correct with regards to which entities 2362  // that it owns by verifying that the file ids that we thought 2363  // we were sending where not received instead 2364  file_id_vect.resize( imesh.size() ); 2365  rval = iFace->tag_get_data( file_id_tag, imesh, &file_id_vect[0] ); 2366  if( MB_SUCCESS != rval ) 2367  { 2368  iFace->tag_delete( file_id_tag ); 2369  return error( rval ); 2370  } 2371  int invalid_count = 0; 2372  j = file_id_vect.begin(); 2373  for( i = imesh.begin(); i != imesh.end(); ++i, ++j ) 2374  { 2375  EntityHandle h = idMap.find( *i ); 2376  if( *j != h ) 2377  { 2378  ++invalid_count; 2379  dbgOut.printf( 1, "Conflicting ownership for %s %ld\n", CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) ), 2380  (long)ID_FROM_HANDLE( *i ) ); 2381  } 2382  } 2383  if( invalid_count ) 2384  { 2385  iFace->tag_delete( file_id_tag ); 2386  MB_SET_ERR( MB_FAILURE, invalid_count << " entities with conflicting ownership found by process " 2387  << myPcomm->proc_config().proc_rank() 2388  << ". This will result in duplicate entities written to file" ); 2389  } 2390 #endif 2391  2392  return iFace->tag_delete( file_id_tag ); 2393 }

References moab::Range::begin(), moab::Range::clear(), moab::WriteHDF5::dbgOut, moab::Range::end(), moab::CN::EntityTypeName(), moab::error(), ErrorCode, moab::ParallelComm::exchange_tags(), moab::WriteHDF5::exportList, moab::ParallelComm::filter_pstatus(), moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_owner(), moab::ID_FROM_HANDLE(), moab::WriteHDF5::idMap, moab::WriteHDF5::iFace, moab::RangeMap< KeyType, ValType, NullVal >::insert(), MB_SET_ERR, MB_SET_ERR_CONT, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TYPE_HANDLE, moab::Range::merge(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::printf(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), PSTATUS_AND, PSTATUS_SHARED, moab::WriteHDF5::ExportSet::range, moab::Range::size(), moab::Interface::tag_delete(), moab::Interface::tag_get_data(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), and moab::TYPE_FROM_HANDLE().

Referenced by parallel_create_file().

◆ factory()

WriterIface * moab::WriteHDF5Parallel::factory ( Interface iface)
static

Definition at line 274 of file WriteHDF5Parallel.cpp.

275 { 276  return new WriteHDF5Parallel( iface ); 277 }

References iface, and WriteHDF5Parallel().

Referenced by moab::ReaderWriterSet::ReaderWriterSet().

◆ gather_interface_meshes()

ErrorCode moab::WriteHDF5Parallel::gather_interface_meshes ( Range non_local_ents)
protected

Figure out which mesh local mesh is duplicated on remote processors and which processor will write that mesh.

Parameters
non_local_entsOutput list of entities that are not to be written by this processor but are referenced by other entities that are to be written.

Definition at line 295 of file WriteHDF5Parallel.cpp.

296 { 297  ErrorCode result; 298  299  // START_SERIAL; 300  dbgOut.print( 3, "Pre-interface mesh:\n" ); 301  dbgOut.print( 3, nodeSet.range ); 302  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter ) 303  dbgOut.print( 3, eiter->range ); 304  dbgOut.print( 3, setSet.range ); 305  306  // Move handles of non-owned entities from lists of entities 307  // that this processor will write to the 'nonowned' list. 308  309  nonowned.clear(); 310  result = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &nonowned ); 311  if( MB_SUCCESS != result ) return error( result ); 312  nodeSet.range = subtract( nodeSet.range, nonowned ); 313  314  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter ) 315  { 316  Range tmpset; 317  result = myPcomm->filter_pstatus( eiter->range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &tmpset ); 318  if( MB_SUCCESS != result ) return error( result ); 319  eiter->range = subtract( eiter->range, tmpset ); 320  nonowned.merge( tmpset ); 321  } 322  323  dbgOut.print( 3, "Post-interface mesh:\n" ); 324  dbgOut.print( 3, nodeSet.range ); 325  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter ) 326  dbgOut.print( 3, eiter->range ); 327  dbgOut.print( 3, setSet.range ); 328  329  // END_SERIAL; 330  331  return MB_SUCCESS; 332 }

References moab::Range::clear(), moab::WriteHDF5::dbgOut, moab::error(), ErrorCode, moab::WriteHDF5::exportList, moab::ParallelComm::filter_pstatus(), MB_SUCCESS, moab::Range::merge(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::print(), PSTATUS_AND, PSTATUS_NOT_OWNED, moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::setSet, and moab::subtract().

Referenced by parallel_create_file().

◆ get_sharedset_tags()

ErrorCode moab::WriteHDF5Parallel::get_sharedset_tags ( )
protected

get any existing tags which aren't excluded and add to shared set tags

◆ negotiate_type_list()

ErrorCode moab::WriteHDF5Parallel::negotiate_type_list ( )
protected

Communicate with other processors to negotiate the types of elements that will be written (the union of the types defined on each proc.)

Definition at line 1228 of file WriteHDF5Parallel.cpp.

1229 { 1230  int result; 1231  const MPI_Comm comm = myPcomm->proc_config().proc_comm(); 1232  1233  exportList.sort(); 1234  int num_types = exportList.size(); 1235  1236  // Get list of types on this processor 1237  typedef std::vector< std::pair< int, int > > typelist; 1238  typelist my_types( num_types ); 1239  (void)VALGRIND_MAKE_VEC_UNDEFINED( my_types ); 1240  typelist::iterator viter = my_types.begin(); 1241  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter ) 1242  { 1243  viter->first = eiter->type; 1244  viter->second = eiter->num_nodes; 1245  ++viter; 1246  } 1247  1248  dbgOut.print( 2, "Local Element Types:\n" ); 1249  for( viter = my_types.begin(); viter != my_types.end(); ++viter ) 1250  { 1251  int type = viter->first; 1252  int count = viter->second; 1253  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)type ), count ); 1254  } 1255  1256  // Broadcast number of types from root to all nodes 1257  int num_types0 = num_types; 1258  result = MPI_Bcast( &num_types0, 1, MPI_INT, 0, comm ); 1259  CHECK_MPI( result ); 1260  // Broadcast type list from root to all nodes 1261  typelist root_types( num_types0 ); 1262  if( 0 == myPcomm->proc_config().proc_rank() ) root_types = my_types; 1263  result = MPI_Bcast( (void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm ); 1264  CHECK_MPI( result ); 1265  1266  // Build local list of any types that root did not know about 1267  typelist non_root_types; 1268  viter = root_types.begin(); 1269  for( typelist::iterator iter = my_types.begin(); iter != my_types.end(); ++iter ) 1270  { 1271  if( viter == root_types.end() || *viter != *iter ) 1272  non_root_types.push_back( *iter ); 1273  else 1274  ++viter; 1275  } 1276  1277  // Determine if any process had types not defined on the root 1278  int non_root_count = non_root_types.size(); 1279  int not_done; 1280  result = MPI_Allreduce( &non_root_count, &not_done, 1, MPI_INT, MPI_LOR, comm ); 1281  CHECK_MPI( result ); 1282  if( not_done ) 1283  { 1284  // Get number of types each processor has that root does not 1285  std::vector< int > counts( myPcomm->proc_config().proc_size() ); 1286  int two_count = 2 * non_root_count; 1287  result = MPI_Gather( &two_count, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, comm ); 1288  CHECK_MPI( result ); 1289  1290  // Get list of types from each processor 1291  std::vector< int > displs( myPcomm->proc_config().proc_size() + 1 ); 1292  (void)VALGRIND_MAKE_VEC_UNDEFINED( displs ); 1293  displs[0] = 0; 1294  for( unsigned long i = 1; i <= myPcomm->proc_config().proc_size(); ++i ) 1295  displs[i] = displs[i - 1] + counts[i - 1]; 1296  int total = displs[myPcomm->proc_config().proc_size()]; 1297  typelist alltypes( total / 2 ); 1298  (void)VALGRIND_MAKE_VEC_UNDEFINED( alltypes ); 1299  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &non_root_types[0], non_root_types.size() * sizeof( int ) ); 1300  result = MPI_Gatherv( (void*)&non_root_types[0], 2 * non_root_count, MPI_INT, (int*)&alltypes[0], &counts[0], 1301  &displs[0], MPI_INT, 0, comm ); 1302  CHECK_MPI( result ); 1303  1304  // Merge type lists. 1305  // Prefer O(n) insertions with O(ln n) search time because 1306  // we expect data from a potentially large number of processes, 1307  // but with a small total number of element types. 1308  if( 0 == myPcomm->proc_config().proc_rank() ) 1309  { 1310  for( viter = alltypes.begin(); viter != alltypes.end(); ++viter ) 1311  { 1312  typelist::iterator titer = std::lower_bound( my_types.begin(), my_types.end(), *viter ); 1313  if( titer == my_types.end() || *titer != *viter ) my_types.insert( titer, *viter ); 1314  } 1315  1316  dbgOut.print( 2, "Global Element Types:\n" ); 1317  for( viter = my_types.begin(); viter != my_types.end(); ++viter ) 1318  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)viter->first ), viter->second ); 1319  } 1320  1321  // Send total number of types to each processor 1322  total = my_types.size(); 1323  result = MPI_Bcast( &total, 1, MPI_INT, 0, comm ); 1324  CHECK_MPI( result ); 1325  1326  // Send list of types to each processor 1327  my_types.resize( total ); 1328  result = MPI_Bcast( (void*)&my_types[0], 2 * total, MPI_INT, 0, comm ); 1329  CHECK_MPI( result ); 1330  } 1331  else 1332  { 1333  // Special case: if root had types but some subset of procs did not 1334  // have those types, but there are no types that the root doesn't 1335  // know about then we still need to update processes that are missing 1336  // types. 1337  my_types.swap( root_types ); 1338  } 1339  1340  // Insert missing types into exportList, with an empty 1341  // range of entities to export. 1342  std::list< ExportSet >::iterator ex_iter = exportList.begin(); 1343  for( viter = my_types.begin(); viter != my_types.end(); ++viter ) 1344  { 1345  while( ex_iter != exportList.end() && *ex_iter < *viter ) 1346  ++ex_iter; 1347  1348  if( ex_iter == exportList.end() || !( *ex_iter == *viter ) ) 1349  { 1350  ExportSet insert; 1351  insert.type = (EntityType)viter->first; 1352  insert.num_nodes = viter->second; 1353  insert.first_id = 0; 1354  insert.offset = 0; 1355  insert.adj_offset = 0; 1356  ex_iter = exportList.insert( ex_iter, insert ); 1357  } 1358  } 1359  1360  return MB_SUCCESS; 1361 }

References moab::WriteHDF5::ExportSet::adj_offset, CHECK_MPI, moab::WriteHDF5::dbgOut, moab::CN::EntityTypeName(), moab::WriteHDF5::exportList, moab::WriteHDF5::ExportSet::first_id, MB_SUCCESS, myPcomm, moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::offset, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::ProcConfig::proc_size(), moab::WriteHDF5::ExportType::type, VALGRIND_CHECK_MEM_IS_DEFINED, and moab::VALGRIND_MAKE_VEC_UNDEFINED().

Referenced by parallel_create_file().

◆ pack_set()

ErrorCode moab::WriteHDF5Parallel::pack_set ( Range::const_iterator  set,
unsigned long *  set_data,
size_t  set_data_length 
)
protected

Pack set data for communication.

If set_data_length is insufficient for the set data, the length entries at indices 1, 2, and 3 of set_data will be set with the necessary lengths, but no data will be written to set_data beyond that.

Definition at line 1675 of file WriteHDF5Parallel.cpp.

1676 { 1677  ErrorCode rval; 1678  const EntityHandle* ptr; 1679  int len; 1680  unsigned char flags; 1681  std::vector< wid_t > tmp; 1682  size_t newlen; 1683  1684  // Buffer must always contain at least flags and desired sizes 1685  assert( buffer_size >= 4 ); 1686  buffer_size -= 4; 1687  1688  Range::const_iterator nd = it; 1689  ++nd; 1690  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CONTENTS, &len, &flags ); 1691  CHECK_MB( rval ); 1692  1693  // Tag mattag; 1694  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag); 1695  // int block; 1696  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*it, 1, &block)) 1697  // block = 0; 1698  // 1699  // if (block) { 1700  // std::vector<int> ids; 1701  // get_global_ids(iFace, ptr, len, flags, ids); 1702  //} 1703  1704  if( len && !( flags & MESHSET_ORDERED ) ) 1705  { 1706  tmp.clear(); 1707  bool blocked = false; 1708  assert( ( 0 == len % 2 ) ); 1709  rval = range_to_blocked_list( ptr, len / 2, tmp, blocked ); 1710  CHECK_MB( rval ); 1711  if( blocked ) flags |= mhdf_SET_RANGE_BIT; 1712  } 1713  else 1714  { 1715  tmp.resize( len ); 1716  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true ); 1717  CHECK_MB( rval ); 1718  tmp.resize( newlen ); 1719  } 1720  1721  buffer[0] = flags; 1722  buffer[1] = tmp.size(); 1723  if( tmp.size() <= buffer_size ) std::copy( tmp.begin(), tmp.end(), buffer + 4 ); 1724  1725  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CHILDREN, &len ); 1726  CHECK_MB( rval ); 1727  tmp.resize( len ); 1728  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true ); 1729  tmp.resize( newlen ); 1730  buffer[2] = tmp.size(); 1731  if( tmp.size() <= buffer_size - buffer[1] ) std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] ); 1732  1733  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::PARENTS, &len ); 1734  CHECK_MB( rval ); 1735  tmp.resize( len ); 1736  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true ); 1737  tmp.resize( newlen ); 1738  buffer[3] = tmp.size(); 1739  if( tmp.size() <= buffer_size - buffer[1] - buffer[2] ) 1740  std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] + buffer[2] ); 1741  1742  return MB_SUCCESS; 1743 }

References buffer, CHECK_MB, moab::WriteUtilIface::CHILDREN, moab::WriteUtilIface::CONTENTS, ErrorCode, moab::WriteUtilIface::get_entity_list_pointers(), MB_SUCCESS, mhdf_SET_RANGE_BIT, moab::WriteUtilIface::PARENTS, moab::WriteHDF5::range_to_blocked_list(), moab::WriteHDF5::vector_to_id_list(), and moab::WriteHDF5::writeUtil.

Referenced by communicate_shared_set_data().

◆ parallel_create_file()

ErrorCode moab::WriteHDF5Parallel::parallel_create_file ( const char *  filename,
bool  overwrite,
const std::vector< std::string > &  qa_records,
const FileOptions opts,
const Tag user_tag_list = 0,
int  user_tag_count = 0,
int  dimension = 3,
double *  times = 0 
)
protectedvirtual

Called by normal (non-parallel) writer. Sets up necessary data for parallel write.

Reimplemented from moab::WriteHDF5.

Definition at line 334 of file WriteHDF5Parallel.cpp.

342 { 343  ErrorCode rval; 344  mhdf_Status status; 345  346  int pcomm_no = 0; 347  opts.get_int_option( "PARALLEL_COMM", pcomm_no ); 348  349  myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no ); 350  if( 0 == myPcomm ) 351  { 352  myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD ); 353  pcommAllocated = true; 354  } 355  356  MPI_Info info = MPI_INFO_NULL; 357  std::string cb_size; 358  rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size ); 359  if( MB_SUCCESS == rval ) 360  { 361  MPI_Info_create( &info ); 362  MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) ); 363  } 364  365  dbgOut.set_rank( myPcomm->proc_config().proc_rank() ); 366  dbgOut.limit_output_to_first_N_procs( 32 ); 367  Range nonlocal; 368  debug_barrier(); 369  dbgOut.tprint( 1, "Gathering interface meshes\n" ); 370  rval = gather_interface_meshes( nonlocal ); 371  if( MB_SUCCESS != rval ) return error( rval ); 372  373  /**************** Get tag names for sets likely to be shared ***********/ 374  // debug_barrier(); 375  // dbgOut.tprint(1, "Getting shared entity sets\n"); 376  // rval = get_sharedset_tags(); 377  // if (MB_SUCCESS != rval) return error(rval); 378  379  /**************** Create actual file and write meta info ***************/ 380  381  debug_barrier(); 382  if( myPcomm->proc_config().proc_rank() == 0 ) 383  { 384  dbgOut.tprintf( 1, "Creating file: %s\n", filename ); 385  386  // Create the file 387  const char* type_names[MBMAXTYPE]; 388  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) ); 389  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i ) 390  type_names[i] = CN::EntityTypeName( i ); 391  392  dbgOut.tprint( 1, "call mhdf_createFile\n" ); 393  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status ); 394  if( !filePtr ) 395  { 396  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); 397  } 398  399  dbgOut.tprint( 1, "call write_qa\n" ); 400  rval = write_qa( qa_records ); 401  if( MB_SUCCESS != rval ) return error( rval ); 402  } 403  404  /**************** Create node coordinate table ***************/ 405  CpuTimer timer; 406  debug_barrier(); 407  dbgOut.tprint( 1, "creating node table\n" ); 408  topState.start( "creating node table" ); 409  rval = create_node_table( dimension ); 410  topState.end( rval ); 411  if( MB_SUCCESS != rval ) return error( rval ); 412  if( times ) times[CREATE_NODE_TIME] = timer.time_elapsed(); 413  414  /**************** Create element tables ***************/ 415  416  debug_barrier(); 417  dbgOut.tprint( 1, "negotiating element types\n" ); 418  topState.start( "negotiating element types" ); 419  rval = negotiate_type_list(); 420  topState.end( rval ); 421  if( MB_SUCCESS != rval ) return error( rval ); 422  if( times ) times[NEGOTIATE_TYPES_TIME] = timer.time_elapsed(); 423  dbgOut.tprint( 1, "creating element table\n" ); 424  topState.start( "creating element tables" ); 425  rval = create_element_tables(); 426  topState.end( rval ); 427  if( MB_SUCCESS != rval ) return error( rval ); 428  if( times ) times[CREATE_ELEM_TIME] = timer.time_elapsed(); 429  430  /*************** Exchange file IDs *****************/ 431  432  debug_barrier(); 433  dbgOut.tprint( 1, "communicating file ids\n" ); 434  topState.start( "communicating file ids" ); 435  rval = exchange_file_ids( nonlocal ); 436  topState.end( rval ); 437  if( MB_SUCCESS != rval ) return error( rval ); 438  if( times ) times[FILEID_EXCHANGE_TIME] = timer.time_elapsed(); 439  440  /**************** Create meshset tables *********************/ 441  442  debug_barrier(); 443  dbgOut.tprint( 1, "creating meshset table\n" ); 444  topState.start( "creating meshset tables" ); 445  rval = create_meshset_tables( times ); 446  topState.end( rval ); 447  if( MB_SUCCESS != rval ) return error( rval ); 448  if( times ) times[CREATE_SET_TIME] = timer.time_elapsed(); 449  450  /**************** Create adjacency tables *********************/ 451  452  debug_barrier(); 453  dbgOut.tprint( 1, "creating adjacency table\n" ); 454  topState.start( "creating adjacency tables" ); 455  rval = create_adjacency_tables(); 456  topState.end( rval ); 457  if( MB_SUCCESS != rval ) return error( rval ); 458  if( times ) times[CREATE_ADJ_TIME] = timer.time_elapsed(); 459  460  /**************** Create tag data *********************/ 461  462  debug_barrier(); 463  dbgOut.tprint( 1, "creating tag tables\n" ); 464  topState.start( "creating tag tables" ); 465  rval = gather_tags( user_tag_list, user_tag_count ); 466  if( MB_SUCCESS != rval ) return error( rval ); 467  rval = create_tag_tables(); 468  topState.end( rval ); 469  if( MB_SUCCESS != rval ) return error( rval ); 470  if( times ) times[CREATE_TAG_TIME] = timer.time_elapsed(); 471  472  /************** Close serial file and reopen parallel *****************/ 473  474  if( 0 == myPcomm->proc_config().proc_rank() ) mhdf_closeFile( filePtr, &status ); 475  476  MPI_Barrier( myPcomm->proc_config().proc_comm() ); 477  dbgOut.tprint( 1, "(re)opening file in parallel mode\n" ); 478  unsigned long junk; 479  hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS ); 480  H5Pset_fapl_mpio( hdf_opt, myPcomm->proc_config().proc_comm(), info ); 481  filePtr = mhdf_openFileWithOpt( filename, 1, &junk, id_type, hdf_opt, &status ); 482  H5Pclose( hdf_opt ); 483  if( !filePtr ) 484  { 485  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); 486  } 487  488  if( collectiveIO ) 489  { 490  dbgOut.print( 1, "USING COLLECTIVE IO\n" ); 491  writeProp = H5Pcreate( H5P_DATASET_XFER ); 492  H5Pset_dxpl_mpio( writeProp, H5FD_MPIO_COLLECTIVE ); 493  } 494  495  /* Test if we can use H5S_APPEND when selecting hyperslabs */ 496  if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) && 497  ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) ) 498  { 499  dbgOut.print( 1, "HDF5 library supports H5Sselect_hyperlsab with H5S_SELECT_APPEND\n" ); 500  hslabOp = H5S_SELECT_APPEND; 501  } 502  503  dbgOut.tprint( 1, "Exiting parallel_create_file\n" ); 504  return MB_SUCCESS; 505 }

References moab::WriteHDF5::collectiveIO, moab::WriteHDF5::CREATE_ADJ_TIME, create_adjacency_tables(), moab::WriteHDF5::CREATE_ELEM_TIME, create_element_tables(), create_meshset_tables(), create_node_table(), moab::WriteHDF5::CREATE_NODE_TIME, moab::WriteHDF5::CREATE_SET_TIME, create_tag_tables(), moab::WriteHDF5::CREATE_TAG_TIME, moab::WriteHDF5::dbgOut, debug_barrier, moab::CN::EntityTypeName(), moab::error(), ErrorCode, exchange_file_ids(), moab::WriteHDF5::FILEID_EXCHANGE_TIME, moab::WriteHDF5::filePtr, gather_interface_meshes(), moab::WriteHDF5::gather_tags(), moab::FileOptions::get_int_option(), moab::FileOptions::get_null_option(), moab::ParallelComm::get_pcomm(), moab::FileOptions::get_str_option(), moab::HDF5_can_append_hyperslabs(), hslabOp, moab::WriteHDF5::id_type, moab::WriteHDF5::iFace, moab::DebugOutput::limit_output_to_first_N_procs(), MB_SET_ERR, MB_SUCCESS, MBEDGE, MBENTITYSET, MBMAXTYPE, mhdf_closeFile(), mhdf_createFile(), mhdf_message(), mhdf_openFileWithOpt(), myPcomm, negotiate_type_list(), moab::WriteHDF5::NEGOTIATE_TYPES_TIME, pcommAllocated, moab::DebugOutput::print(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::DebugOutput::set_rank(), moab::CpuTimer::time_elapsed(), moab::WriteHDF5::topState, moab::DebugOutput::tprint(), moab::DebugOutput::tprintf(), moab::WriteHDF5::write_qa(), and moab::WriteHDF5::writeProp.

◆ print_set_sharing_data()

void moab::WriteHDF5Parallel::print_set_sharing_data ( const Range range,
const char *  label,
Tag  idt 
)
protected

Definition at line 1459 of file WriteHDF5Parallel.cpp.

1460 { 1461  dbgOut.printf( SSVB, "set\tid\towner\t%-*s\tfid\tshared\n", (int)( sizeof( EntityHandle ) * 2 ), "handle" ); 1462  for( Range::iterator it = range.begin(); it != range.end(); ++it ) 1463  { 1464  int id; 1465  iFace->tag_get_data( idt, &*it, 1, &id ); 1466  EntityHandle handle = 0; 1467  unsigned owner = 0; 1468  wid_t file_id = 0; 1469  myPcomm->get_entityset_owner( *it, owner, &handle ); 1470  if( !idMap.find( *it, file_id ) ) file_id = 0; 1471  dbgOut.printf( SSVB, "%s\t%d\t%u\t%lx\t%lu\t", label, id, owner, (unsigned long)handle, 1472  (unsigned long)file_id ); 1473  std::vector< unsigned > procs; 1474  myPcomm->get_entityset_procs( *it, procs ); 1475  if( procs.empty() ) 1476  dbgOut.print( SSVB, "<none>\n" ); 1477  else 1478  { 1479  for( unsigned i = 0; i < procs.size() - 1; ++i ) 1480  dbgOut.printf( SSVB, "%u,", procs[i] ); 1481  dbgOut.printf( SSVB, "%u\n", procs.back() ); 1482  } 1483  } 1484 }

References moab::Range::begin(), moab::WriteHDF5::dbgOut, moab::Range::end(), moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_procs(), moab::WriteHDF5::idMap, moab::WriteHDF5::iFace, myPcomm, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::SSVB, and moab::Interface::tag_get_data().

Referenced by print_shared_sets().

◆ print_shared_sets()

void moab::WriteHDF5Parallel::print_shared_sets ( )
protected

Definition at line 1486 of file WriteHDF5Parallel.cpp.

1487 { 1488  const char* tag_names[][2] = { { MATERIAL_SET_TAG_NAME, "block" }, 1489  { DIRICHLET_SET_TAG_NAME, "nodeset" }, 1490  { NEUMANN_SET_TAG_NAME, "sideset" }, 1491  { 0, 0 } }; 1492  1493  for( int i = 0; tag_names[i][0]; ++i ) 1494  { 1495  Tag tag; 1496  if( MB_SUCCESS != iFace->tag_get_handle( tag_names[i][0], 1, MB_TYPE_INTEGER, tag ) ) continue; 1497  1498  Range tagged; 1499  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, tagged ); 1500  print_set_sharing_data( tagged, tag_names[i][1], tag ); 1501  } 1502  1503  Tag geom, id; 1504  if( MB_SUCCESS != iFace->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom ) ) return; 1505  id = iFace->globalId_tag(); 1506  1507  const char* geom_names[] = { "vertex", "curve", "surface", "volume" }; 1508  for( int d = 0; d <= 3; ++d ) 1509  { 1510  Range tagged; 1511  const void* vals[] = { &d }; 1512  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &geom, vals, 1, tagged ); 1513  print_set_sharing_data( tagged, geom_names[d], id ); 1514  } 1515 }

References DIRICHLET_SET_TAG_NAME, GEOM_DIMENSION_TAG_NAME, moab::Interface::get_entities_by_type_and_tag(), moab::Interface::globalId_tag(), moab::WriteHDF5::iFace, MATERIAL_SET_TAG_NAME, MB_SUCCESS, MB_TYPE_INTEGER, MBENTITYSET, NEUMANN_SET_TAG_NAME, print_set_sharing_data(), and moab::Interface::tag_get_handle().

Referenced by communicate_shared_set_ids().

◆ print_times()

void moab::WriteHDF5Parallel::print_times ( const double *  times) const
protectedvirtual

Definition at line 2395 of file WriteHDF5Parallel.cpp.

2396 { 2397  if( !myPcomm ) 2398  { 2399  WriteHDF5::print_times( times ); 2400  } 2401  else 2402  { 2403  double recv[NUM_TIMES]; 2404  MPI_Reduce( (void*)times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() ); 2405  if( 0 == myPcomm->proc_config().proc_rank() ) WriteHDF5::print_times( recv ); 2406  } 2407 }

References myPcomm, moab::WriteHDF5::NUM_TIMES, moab::WriteHDF5::print_times(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), and moab::ProcConfig::proc_rank().

◆ remove_remote_entities() [1/2]

void moab::WriteHDF5Parallel::remove_remote_entities ( EntityHandle  relative,
Range range 
)
protected

Remove any remote mesh entities from the passed range.

Definition at line 2200 of file WriteHDF5Parallel.cpp.

2201 { 2202  Range result; 2203  result.merge( intersect( range, nodeSet.range ) ); 2204  result.merge( intersect( range, setSet.range ) ); 2205  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter ) 2206  result.merge( intersect( range, eiter->range ) ); 2207  2208  // result.merge(intersect(range, myParallelSets)); 2209  Range sets; 2210  int junk; 2211  sets.merge( Range::lower_bound( range.begin(), range.end(), CREATE_HANDLE( MBENTITYSET, 0, junk ) ), range.end() ); 2212  remove_remote_sets( relative, sets ); 2213  result.merge( sets ); 2214  range.swap( result ); 2215 }

References moab::Range::begin(), moab::CREATE_HANDLE(), moab::Range::end(), moab::WriteHDF5::exportList, moab::intersect(), moab::Range::lower_bound(), MBENTITYSET, moab::Range::merge(), moab::WriteHDF5::nodeSet, moab::WriteHDF5::ExportSet::range, remove_remote_sets(), moab::WriteHDF5::setSet, and moab::Range::swap().

Referenced by remove_remote_entities().

◆ remove_remote_entities() [2/2]

void moab::WriteHDF5Parallel::remove_remote_entities ( EntityHandle  relative,
std::vector< EntityHandle > &  vect 
)
protected

Definition at line 2225 of file WriteHDF5Parallel.cpp.

2226 { 2227  Range intrsct; 2228  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter ) 2229  intrsct.insert( *iter ); 2230  remove_remote_entities( relative, intrsct ); 2231  2232  unsigned int read, write; 2233  for( read = write = 0; read < vect.size(); ++read ) 2234  { 2235  if( intrsct.find( vect[read] ) != intrsct.end() ) 2236  { 2237  if( read != write ) vect[write] = vect[read]; 2238  ++write; 2239  } 2240  } 2241  if( write != vect.size() ) vect.resize( write ); 2242 }

References moab::Range::end(), moab::Range::find(), moab::Range::insert(), and remove_remote_entities().

◆ remove_remote_sets() [1/2]

void moab::WriteHDF5Parallel::remove_remote_sets ( EntityHandle  relative,
Range range 
)
protected

Definition at line 2217 of file WriteHDF5Parallel.cpp.

2218 { 2219  Range result( intersect( range, setSet.range ) ); 2220  // Store the non-intersecting entities separately if needed 2221  // Range remaining(subtract(range, result)); 2222  range.swap( result ); 2223 }

References moab::intersect(), moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::setSet, and moab::Range::swap().

Referenced by remove_remote_entities(), and remove_remote_sets().

◆ remove_remote_sets() [2/2]

void moab::WriteHDF5Parallel::remove_remote_sets ( EntityHandle  relative,
std::vector< EntityHandle > &  vect 
)
protected

Definition at line 2244 of file WriteHDF5Parallel.cpp.

2245 { 2246  Range intrsct; 2247  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter ) 2248  intrsct.insert( *iter ); 2249  remove_remote_sets( relative, intrsct ); 2250  2251  unsigned int read, write; 2252  for( read = write = 0; read < vect.size(); ++read ) 2253  { 2254  if( intrsct.find( vect[read] ) != intrsct.end() ) 2255  { 2256  if( read != write ) vect[write] = vect[read]; 2257  ++write; 2258  } 2259  } 2260  if( write != vect.size() ) vect.resize( write ); 2261 }

References moab::Range::end(), moab::Range::find(), moab::Range::insert(), and remove_remote_sets().

◆ unpack_set()

ErrorCode moab::WriteHDF5Parallel::unpack_set ( EntityHandle  set,
const unsigned long *  set_data,
size_t  set_data_length 
)
protected

Unpack set data from communication.

Definition at line 1798 of file WriteHDF5Parallel.cpp.

1799 { 1800  // Use local variables for readability 1801  assert( buffer_size >= 4 ); 1802  assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size ); 1803  const unsigned long flags = buffer[0]; 1804  unsigned long num_content = buffer[1]; 1805  const unsigned long num_child = buffer[2]; 1806  const unsigned long num_parent = buffer[3]; 1807  const unsigned long* contents = buffer + 4; 1808  const unsigned long* children = contents + num_content; 1809  const unsigned long* parents = children + num_child; 1810  1811  SpecialSetData* data = find_set_data( set ); 1812  assert( NULL != data ); 1813  if( NULL == data ) return MB_FAILURE; 1814  1815  // Tag mattag; 1816  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag); 1817  // int block; 1818  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &set, 1, &block)) 1819  // block = 0; 1820  1821  // If either the current data or the new data is in ranged format, 1822  // then change the other to ranged format if it isn't already 1823  // in both cases when they differ, the data will end up "compressed range" 1824  std::vector< wid_t > tmp; 1825  if( ( flags & mhdf_SET_RANGE_BIT ) != ( data->setFlags & mhdf_SET_RANGE_BIT ) ) 1826  { 1827  if( flags & mhdf_SET_RANGE_BIT ) 1828  { 1829  tmp = data->contentIds; 1830  convert_to_ranged_ids( &tmp[0], tmp.size(), data->contentIds ); 1831  data->setFlags |= mhdf_SET_RANGE_BIT; 1832  } 1833  else 1834  { 1835  tmp.clear(); 1836  convert_to_ranged_ids( contents, num_content, tmp ); 1837  num_content = tmp.size(); 1838  if( sizeof( wid_t ) < sizeof( long ) ) 1839  { 1840  size_t old_size = tmp.size(); 1841  tmp.resize( sizeof( long ) * old_size / sizeof( wid_t ) ); 1842  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] ); 1843  for( long i = ( (long)old_size ) - 1; i >= 0; --i ) 1844  array[i] = tmp[i]; 1845  contents = array; 1846  } 1847  else if( sizeof( wid_t ) > sizeof( long ) ) 1848  { 1849  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] ); 1850  std::copy( tmp.begin(), tmp.end(), array ); 1851  } 1852  contents = reinterpret_cast< unsigned long* >( &tmp[0] ); 1853  } 1854  } 1855  1856  if( data->setFlags & mhdf_SET_RANGE_BIT ) 1857  merge_ranged_ids( contents, num_content, data->contentIds ); 1858  else 1859  merge_vector_ids( contents, num_content, data->contentIds ); 1860  1861  merge_vector_ids( children, num_child, data->childIds ); 1862  merge_vector_ids( parents, num_parent, data->parentIds ); 1863  return MB_SUCCESS; 1864 }

References buffer, moab::WriteHDF5::SpecialSetData::childIds, children, moab::WriteHDF5::SpecialSetData::contentIds, moab::convert_to_ranged_ids(), moab::WriteHDF5::find_set_data(), MB_SUCCESS, moab::merge_ranged_ids(), moab::merge_vector_ids(), mhdf_SET_RANGE_BIT, moab::WriteHDF5::SpecialSetData::parentIds, and moab::WriteHDF5::SpecialSetData::setFlags.

Referenced by communicate_shared_set_data().

Member Data Documentation

◆ hslabOp

H5S_seloper_t moab::WriteHDF5Parallel::hslabOp
private

Operation to use to append hyperslab selections.

Definition at line 186 of file WriteHDF5Parallel.hpp.

Referenced by parallel_create_file().

◆ myPcomm

◆ pcommAllocated

bool moab::WriteHDF5Parallel::pcommAllocated
private

whether this instance allocated (and dtor should delete) the pcomm

Definition at line 183 of file WriteHDF5Parallel.hpp.

Referenced by parallel_create_file(), and ~WriteHDF5Parallel().


The documentation for this class was generated from the following files: