Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
moab::WriteHDF5Parallel Class Reference

Write MOAB HDF5 file in parallel. More...

#include <WriteHDF5Parallel.hpp>

+ Inheritance diagram for moab::WriteHDF5Parallel:
+ Collaboration diagram for moab::WriteHDF5Parallel:

Classes

struct  DataSetCreator
 Argument ot create_dataset. More...
 
struct  NoopDescCreator
 

Public Member Functions

 WriteHDF5Parallel (Interface *iface)
 
virtual ~WriteHDF5Parallel ()
 
- Public Member Functions inherited from moab::WriteHDF5
 WriteHDF5 (Interface *iface)
 
virtual ~WriteHDF5 ()
 
ErrorCode write_file (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list=NULL, int num_tags=0, int user_dimension=3)
 
mhdf_FileHandle file_ptr ()
 
WriteUtilIfacewrite_util ()
 
ErrorCode create_elem_table (const ExportSet &block, long num_ents, long &first_id_out)
 
ErrorCode create_set_meta (long num_sets, long &first_id_out)
 
- Public Member Functions inherited from moab::WriterIface
virtual ~WriterIface ()
 

Static Public Member Functions

static WriterIfacefactory (Interface *)
 
- Static Public Member Functions inherited from moab::WriteHDF5
static WriterIfacefactory (Interface *)
 

Protected Member Functions

virtual void debug_barrier_line (int lineno)
 
virtual void print_times (const double *times) const
 
virtual ErrorCode parallel_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const FileOptions &opts, const Tag *user_tag_list=0, int user_tag_count=0, int dimension=3, double *times=0)
 Called by normal (non-parallel) writer. Sets up necessary data for parallel write. More...
 
ErrorCode gather_interface_meshes (Range &non_local_ents)
 Figure out which mesh local mesh is duplicated on remote processors and which processor will write that mesh. More...
 
ErrorCode exchange_file_ids (const Range &non_local_ents)
 For entities that will be written by another processor but are referenced by entities on this processor, get the file Ids that will be assigned to those so they can be referenced by entities to be written on this processor. More...
 
ErrorCode communicate_shared_set_ids (const Range &owned, const Range &remote)
 Get remote ids for shared sets. More...
 
ErrorCode pack_set (Range::const_iterator set, unsigned long *set_data, size_t set_data_length)
 Pack set data for communication. More...
 
ErrorCode unpack_set (EntityHandle set, const unsigned long *set_data, size_t set_data_length)
 Unpack set data from communication. More...
 
ErrorCode communicate_shared_set_data (const Range &owned, const Range &remote)
 Communicate set contents between processors such that each owner knows the contents, parents, & child lists from all processors that have a copy of the set. More...
 
ErrorCode create_node_table (int dimension)
 Create the node table in the file. More...
 
ErrorCode negotiate_type_list ()
 Communicate with other processors to negotiate the types of elements that will be written (the union of the types defined on each proc.) More...
 
ErrorCode create_element_tables ()
 Create tables to hold element connectivity. More...
 
ErrorCode create_adjacency_tables ()
 Create tables to hold element adjacencies. More...
 
ErrorCode create_meshset_tables (double *times)
 Create tables for mesh sets. More...
 
ErrorCode create_tag_tables ()
 Write tag descriptions and create tables to hold tag data. More...
 
void remove_remote_entities (EntityHandle relative, Range &range)
 Remove any remote mesh entities from the passed range. More...
 
void remove_remote_entities (EntityHandle relative, std::vector< EntityHandle > &vect)
 
void remove_remote_sets (EntityHandle relative, Range &range)
 
void remove_remote_sets (EntityHandle relative, std::vector< EntityHandle > &vect)
 
ErrorCode get_sharedset_tags ()
 get any existing tags which aren't excluded and add to shared set tags More...
 
ErrorCode append_serial_tag_data (std::vector< unsigned char > &buffer, const WriteHDF5::TagDesc &tag)
 
ErrorCode check_serial_tag_data (const std::vector< unsigned char > &buffer, std::vector< TagDesc * > *missing=0, std::vector< TagDesc * > *newlist=0)
 helper function for create_tag_tables More...
 
ErrorCode create_dataset (int num_datasets, const long *num_owned_entities, long *offsets_out, long *max_proc_ents_out, long *total_ents_out, const DataSetCreator &creator=NoopDescCreator(), ExportSet *groups[]=0, wid_t *first_ids_out=NULL)
 Do typical communication for dataset creation. More...
 
void print_shared_sets ()
 
void print_set_sharing_data (const Range &range, const char *label, Tag idt)
 
- Protected Member Functions inherited from moab::WriteHDF5
virtual ErrorCode write_finished ()
 
ErrorCode gather_tags (const Tag *user_tag_list, int user_tag_list_length)
 Gather tags. More...
 
bool check_dense_format_tag (const ExportSet &ents, const Range &all_tagged, bool prefer_dense)
 
ErrorCode count_adjacencies (const Range &elements, wid_t &result)
 
ErrorCode count_set_size (const Range &sets, long &contents_length_out, long &children_length_out, long &parents_length_out)
 
ErrorCode get_set_info (EntityHandle set, long &num_entities, long &num_children, long &num_parents, unsigned long &flags)
 Get information about a meshset. More...
 
ErrorCode create_set_tables (long contents_length, long children_length, long parents_length)
 
ErrorCode write_qa (const std::vector< std::string > &list)
 Write exodus-type QA info. More...
 
ErrorCode get_num_sparse_tagged_entities (const TagDesc &tag, size_t &count)
 Get tagged entities for which to write tag values. More...
 
ErrorCode get_sparse_tagged_entities (const TagDesc &tag, Range &range)
 Get tagged entities for which to write tag values. More...
 
void get_write_entities (Range &range)
 Get entities that will be written to file. More...
 
const ExportSetfind (const ExportType &type) const
 
const SpecialSetDatafind_set_data (EntityHandle h) const
 
SpecialSetDatafind_set_data (EntityHandle h)
 
void print_id_map () const
 
void print_id_map (std::ostream &str, const char *prefix="") const
 
ErrorCode create_tag (const TagDesc &tag_data, unsigned long num_entities, unsigned long var_len_total)
 
ErrorCode assign_ids (const Range &entities, wid_t first_id)
 add entities to idMap More...
 
ErrorCode range_to_blocked_list (const Range &input_range, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_blocked_list (const EntityHandle *input_ranges, size_t num_input_ranges, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_id_list (const Range &input_range, wid_t *array)
 
ErrorCode vector_to_id_list (const std::vector< EntityHandle > &input, std::vector< wid_t > &output, bool remove_non_written=false)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, wid_t *output, size_t num_entities)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, size_t input_len, wid_t *output, size_t &output_len, bool remove_non_written)
 Get IDs for entities. More...
 
bool convert_handle_tag (EntityHandle *data, size_t count) const
 
bool convert_handle_tag (const EntityHandle *source, EntityHandle *dest, size_t count) const
 
ErrorCode get_adjacencies (EntityHandle entity, std::vector< wid_t > &adj)
 
ErrorCode get_tag_data_length (const TagDesc &tag_info, const Range &range, unsigned long &result)
 get sum of lengths of tag values (as number of type) for variable length tag data. More...
 
virtual void print_times (const double times[NUM_TIMES]) const
 

Private Attributes

ParallelCommmyPcomm
 pcomm controlling parallel nature of mesh More...
 
bool pcommAllocated
 whether this instance allocated (and dtor should delete) the pcomm More...
 
H5S_seloper_t hslabOp
 Operation to use to append hyperslab selections. More...
 

Additional Inherited Members

- Public Types inherited from moab::WriteHDF5
typedef EntityHandle wid_t
 
- Static Public Attributes inherited from moab::WriteHDF5
static const hid_t id_type = get_id_type()
 
- Protected Types inherited from moab::WriteHDF5
enum  TimingValues {
  TOTAL_TIME = 0 , GATHER_TIME , CREATE_TIME , CREATE_NODE_TIME ,
  NEGOTIATE_TYPES_TIME , CREATE_ELEM_TIME , FILEID_EXCHANGE_TIME , CREATE_ADJ_TIME ,
  CREATE_SET_TIME , SHARED_SET_IDS , SHARED_SET_CONTENTS , SET_OFFSET_TIME ,
  CREATE_TAG_TIME , COORD_TIME , CONN_TIME , SET_TIME ,
  SET_META , SET_CONTENT , SET_PARENT , SET_CHILD ,
  ADJ_TIME , TAG_TIME , DENSE_TAG_TIME , SPARSE_TAG_TIME ,
  VARLEN_TAG_TIME , NUM_TIMES
}
 
- Protected Attributes inherited from moab::WriteHDF5
HDF5ErrorHandler errorHandler
 Store old HDF5 error handling function. More...
 
size_t bufferSize
 The size of the data buffer (dataBuffer). More...
 
char * dataBuffer
 A memory buffer to use for all I/O operations. More...
 
InterfaceiFace
 Interface pointer passed to constructor. More...
 
WriteUtilIfacewriteUtil
 Cached pointer to writeUtil interface. More...
 
mhdf_FileHandle filePtr
 The file handle from the mhdf library. More...
 
RangeMap< EntityHandle, wid_tidMap
 Map from entity handles to file IDs. More...
 
std::list< ExportSetexportList
 The list elements to export. More...
 
ExportSet nodeSet
 The list of nodes to export. More...
 
ExportSet setSet
 The list of sets to export. More...
 
unsigned long setContentsOffset
 Offset into set contents table (zero except for parallel) More...
 
unsigned long setChildrenOffset
 Offset into set children table (zero except for parallel) More...
 
unsigned long setParentsOffset
 
long maxNumSetContents
 The largest number of values to write for any processor (needed to do collective IO). More...
 
long maxNumSetChildren
 
long maxNumSetParents
 
bool writeSets
 Flags idicating if set data should be written. For the normal (non-parallel) case, these values will depend only on whether or not there is any data to be written. For parallel-meshes, opening the data table is collective so the values must depend on whether or not any processor has meshsets to be written. More...
 
bool writeSetContents
 
bool writeSetChildren
 
bool writeSetParents
 
std::vector< SpecialSetDataspecialSets
 Array of special/shared sets, in order of handle value. More...
 
std::list< TagDesctagList
 The list of tags to export. More...
 
bool parallelWrite
 True if doing parallel write. More...
 
bool collectiveIO
 True if using collective IO calls for parallel write. More...
 
bool writeTagDense
 True if writing dense-formatted tag data. More...
 
hid_t writeProp
 Property set to pass to H5Dwrite calls. For serial, should be H5P_DEFAULTS. For parallel, may request collective IO. More...
 
DebugOutput dbgOut
 Utility to log debug output. More...
 
bool debugTrack
 Look for overlapping and/or missing writes. More...
 
- Static Protected Attributes inherited from moab::WriteHDF5
static MPEState topState
 
static MPEState subState
 

Detailed Description

Write MOAB HDF5 file in parallel.

Author
Jason Kraftcheck \data 22 July 2004

Definition at line 20 of file WriteHDF5Parallel.hpp.

Constructor & Destructor Documentation

◆ WriteHDF5Parallel()

moab::WriteHDF5Parallel::WriteHDF5Parallel ( Interface iface)

Consturctor

Definition at line 279 of file WriteHDF5Parallel.cpp.

280  : WriteHDF5( iface ), myPcomm( NULL ), pcommAllocated( false ), hslabOp( H5S_SELECT_OR )
281 {
282 }

Referenced by factory().

◆ ~WriteHDF5Parallel()

moab::WriteHDF5Parallel::~WriteHDF5Parallel ( )
virtual

Definition at line 284 of file WriteHDF5Parallel.cpp.

285 {
286  if( pcommAllocated && myPcomm ) delete myPcomm;
287 }

References myPcomm, and pcommAllocated.

Member Function Documentation

◆ append_serial_tag_data()

ErrorCode moab::WriteHDF5Parallel::append_serial_tag_data ( std::vector< unsigned char > &  buffer,
const WriteHDF5::TagDesc tag 
)
protected

Definition at line 582 of file WriteHDF5Parallel.cpp.

584 {
585  ErrorCode rval;
586 
587  std::string name;
588  rval = iFace->tag_get_name( tag.tag_id, name );
589  if( MB_SUCCESS != rval ) return error( rval );
590 
591  // Get name length, including space for null char
592  size_t name_len = name.size() + 1;
593  if( name_len == 1 ) return MB_SUCCESS; // Skip tags with no name
594 
595  DataType data_type;
596  rval = iFace->tag_get_data_type( tag.tag_id, data_type );
597  if( MB_SUCCESS != rval ) return error( rval );
598 
599  // Get default value
600  int def_val_len;
601  const void* def_val;
602  if( MB_SUCCESS != iFace->tag_get_default_value( tag.tag_id, def_val, def_val_len ) )
603  {
604  def_val_len = 0;
605  def_val = 0;
606  }
607 
608  // Allocate struct within buffer
609  size_t init_size = buffer.size();
610  buffer.resize( init_size + serial_tag_data::len( name_len, def_val_len, data_type ) );
611  serial_tag_data* ptr = reinterpret_cast< serial_tag_data* >( &buffer[init_size] );
612 
613  // Populate struct
614  rval = iFace->tag_get_type( tag.tag_id, ptr->storage );
615  if( MB_SUCCESS != rval ) return error( rval );
616  ptr->type = data_type;
617  rval = iFace->tag_get_length( tag.tag_id, ptr->size );
618  if( MB_VARIABLE_DATA_LENGTH == rval )
619  ptr->size = MB_VARIABLE_LENGTH;
620  else if( MB_SUCCESS != rval )
621  return error( rval );
622  ptr->name_len = name_len;
623  Range range;
624  memset( ptr->name, 0, ptr->name_len );
625  memcpy( ptr->name, name.data(), name.size() );
626  ptr->def_val_len = def_val_len;
627  ptr->set_default_value( def_val );
628 
629  return MB_SUCCESS;
630 }

References buffer, moab::serial_tag_data::def_val_len, moab::error(), ErrorCode, moab::WriteHDF5::iFace, moab::serial_tag_data::len(), MB_SUCCESS, MB_VARIABLE_DATA_LENGTH, MB_VARIABLE_LENGTH, moab::serial_tag_data::name, moab::serial_tag_data::name_len, moab::serial_tag_data::set_default_value(), moab::serial_tag_data::size, moab::serial_tag_data::storage, moab::Interface::tag_get_data_type(), moab::Interface::tag_get_default_value(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::Interface::tag_get_type(), moab::WriteHDF5::TagDesc::tag_id, and moab::serial_tag_data::type.

Referenced by create_tag_tables().

◆ check_serial_tag_data()

ErrorCode moab::WriteHDF5Parallel::check_serial_tag_data ( const std::vector< unsigned char > &  buffer,
std::vector< TagDesc * > *  missing = 0,
std::vector< TagDesc * > *  newlist = 0 
)
protected

helper function for create_tag_tables

Definition at line 632 of file WriteHDF5Parallel.cpp.

635 {
636  ErrorCode rval;
637 
638  // Use 'write_sparse' field as a 'visited' mark
639  std::list< TagDesc >::iterator tag_iter;
640  if( missing )
641  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
642  tag_iter->write_sparse = true;
643 
644  // Use a set as a temporary for what will ultimately go in
645  // newlist because we need to pass back newlist in the order
646  // of the tagList member.
647  std::set< TagDesc* > newset;
648 
649  // Iterate over data from, updating the local list of tags.
650  // Be careful to keep tagList sorted such that in the end all
651  // procs have the same list in the same order.
652  std::vector< unsigned char >::const_iterator diter = buffer.begin();
653  tag_iter = tagList.begin();
654  while( diter < buffer.end() )
655  {
656  // Get struct from buffer
657  const serial_tag_data* ptr = reinterpret_cast< const serial_tag_data* >( &*diter );
658 
659  // Find local struct for tag
660  std::string name( ptr->name );
661  std::string n;
662  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
663  if( n > name )
664  {
665  tag_iter = tagList.begin(); // New proc, start search from beginning
666  }
667  iFace->tag_get_name( tag_iter->tag_id, n );
668  while( n < name )
669  {
670  ++tag_iter;
671  if( tag_iter == tagList.end() ) break;
672  iFace->tag_get_name( tag_iter->tag_id, n );
673  }
674  if( tag_iter == tagList.end() || n != name )
675  { // New tag
676  TagDesc newtag;
677 
678  if( ptr->size == MB_VARIABLE_LENGTH )
679  rval = iFace->tag_get_handle( name.c_str(), ptr->def_val_len, ptr->type, newtag.tag_id,
680  MB_TAG_VARLEN | MB_TAG_CREAT | ptr->storage, ptr->default_value() );
681  else
682  rval = iFace->tag_get_handle( name.c_str(), ptr->size, ptr->type, newtag.tag_id,
683  MB_TAG_CREAT | ptr->storage, ptr->default_value() );
684  if( MB_SUCCESS != rval ) return error( rval );
685 
686  newtag.sparse_offset = 0;
687  newtag.var_data_offset = 0;
688  newtag.write_sparse = false;
689  newtag.max_num_ents = 0;
690  newtag.max_num_vals = 0;
691 
692  tag_iter = tagList.insert( tag_iter, newtag );
693  if( newlist ) newset.insert( &*tag_iter );
694  }
695  else
696  { // Check that tag is as expected
697  DataType type;
698  iFace->tag_get_data_type( tag_iter->tag_id, type );
699  if( type != ptr->type )
700  {
701  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent data type for tag \"" << name << "\"" );
702  }
703  int size;
704  iFace->tag_get_length( tag_iter->tag_id, size );
705  if( size != ptr->size )
706  {
707  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent size for tag \"" << name << "\"" );
708  }
709  tag_iter->write_sparse = false;
710  }
711 
712  // Step to next variable-length struct.
713  diter += ptr->len();
714  }
715 
716  // Now pass back any local tags that weren't in the buffer
717  if( missing )
718  {
719  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
720  {
721  if( tag_iter->write_sparse )
722  {
723  tag_iter->write_sparse = false;
724  missing->push_back( &*tag_iter );
725  }
726  }
727  }
728 
729  // Be careful to populate newlist in the same, sorted, order as tagList
730  if( newlist )
731  {
732  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
733  if( newset.find( &*tag_iter ) != newset.end() ) newlist->push_back( &*tag_iter );
734  }
735 
736  return MB_SUCCESS;
737 }

References buffer, moab::serial_tag_data::def_val_len, moab::serial_tag_data::default_value(), moab::error(), ErrorCode, moab::WriteHDF5::iFace, moab::serial_tag_data::len(), moab::WriteHDF5::TagDesc::max_num_ents, moab::WriteHDF5::TagDesc::max_num_vals, MB_SET_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_VARLEN, MB_VARIABLE_LENGTH, moab::serial_tag_data::name, size, moab::serial_tag_data::size, moab::WriteHDF5::TagDesc::sparse_offset, moab::serial_tag_data::storage, moab::Interface::tag_get_data_type(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::WriteHDF5::TagDesc::tag_id, moab::WriteHDF5::tagList, moab::serial_tag_data::type, moab::WriteHDF5::TagDesc::var_data_offset, and moab::WriteHDF5::TagDesc::write_sparse.

Referenced by create_tag_tables().

◆ communicate_shared_set_data()

ErrorCode moab::WriteHDF5Parallel::communicate_shared_set_data ( const Range owned,
const Range remote 
)
protected

Communicate set contents between processors such that each owner knows the contents, parents, & child lists from all processors that have a copy of the set.

Definition at line 1860 of file WriteHDF5Parallel.cpp.

1861 {
1862  ErrorCode rval;
1863  int mperr;
1864  const unsigned rank = myPcomm->proc_config().proc_rank();
1865  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1866 
1867  dbgOut.tprintf( 1, "COMMUNICATING SHARED SET DATA (%lu owned & %lu remote)\n", (unsigned long)owned.size(),
1868  (unsigned long)remote.size() );
1869 
1870  // Calculate the total number of messages to be in transit (send and receive)
1871  size_t nummess = 0;
1872  std::vector< unsigned > procs;
1873  ;
1874  Range shared( owned );
1875  shared.merge( remote );
1876  for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
1877  {
1878  procs.clear();
1879  rval = myPcomm->get_entityset_procs( *i, procs );
1880  CHECK_MB( rval );
1881  nummess += procs.size();
1882  }
1883 
1884  // Choose a receive buffer size. We need 4*sizeof(long) minimum,
1885  // but that is almost useless so use 16*sizeof(long) as the minimum
1886  // instead. Choose an upper limit such that we don't exceed 32 MB
1887  // of allocated memory (unless we absolutely must to meet the minimum.)
1888  // Also, don't initially choose buffers larger than 128*sizeof(long).
1889  const size_t MAX_BUFFER_MEM = 32 * 1024 * 1024 / sizeof( long );
1890  // const size_t INIT_BUFFER_SIZE = 128;
1891  const size_t INIT_BUFFER_SIZE = 1024;
1892  const size_t MIN_BUFFER_SIZE = 16;
1893  size_t init_buff_size = INIT_BUFFER_SIZE;
1894  if( init_buff_size * nummess > MAX_BUFFER_MEM ) init_buff_size = MAX_BUFFER_MEM / nummess;
1895  if( init_buff_size < MIN_BUFFER_SIZE ) init_buff_size = MIN_BUFFER_SIZE;
1896 
1897  dbgOut.printf( 2, "Using buffer size of %lu for an expected message count of %lu\n", (unsigned long)init_buff_size,
1898  (unsigned long)nummess );
1899 
1900  // Count number of recvs
1901  size_t numrecv = 0;
1902  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1903  {
1904  procs.clear();
1905  rval = myPcomm->get_entityset_procs( *i, procs );
1906  CHECK_MB( rval );
1907  numrecv += procs.size();
1908  if( std::find( procs.begin(), procs.end(), rank ) != procs.end() ) --numrecv;
1909  }
1910 
1911  // Post receive buffers for all owned sets for all sharing procs
1912  std::vector< MPI_Request > recv_req( numrecv, MPI_REQUEST_NULL );
1913  std::vector< MPI_Request > lrecv_req( numrecv, MPI_REQUEST_NULL );
1914 
1915  std::vector< std::vector< unsigned long > > recv_buf( numrecv, std::vector< unsigned long >( init_buff_size ) );
1916  int idx = 0;
1917  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1918  {
1919  procs.clear();
1920  rval = myPcomm->get_entityset_procs( *i, procs );
1921  CHECK_MB( rval );
1922  for( size_t j = 0; j < procs.size(); ++j )
1923  {
1924  if( procs[j] == rank ) continue;
1925  int tag = ID_FROM_HANDLE( *i );
1926  if( *i != CREATE_HANDLE( MBENTITYSET, tag ) )
1927  {
1928 #ifndef NDEBUG
1929  abort();
1930 #endif
1931  CHECK_MB( MB_FAILURE );
1932  }
1933  dbgOut.printf( 5, "Posting buffer to receive set %d from proc %u\n", tag, procs[j] );
1934  mperr =
1935  MPI_Irecv( &recv_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, procs[j], tag, comm, &recv_req[idx] );
1936  CHECK_MPI( mperr );
1937  ++idx;
1938  }
1939  }
1940  assert( (size_t)idx == numrecv );
1941 
1942  // Now send set data for all remote sets that I know about
1943  std::vector< MPI_Request > send_req( remote.size() );
1944  std::vector< std::vector< unsigned long > > send_buf( remote.size() );
1945  idx = 0;
1946  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
1947  {
1948  send_buf[idx].resize( init_buff_size );
1949  rval = pack_set( i, &send_buf[idx][0], init_buff_size );
1950  CHECK_MB( rval );
1951  EntityHandle remote_handle;
1952  unsigned owner;
1953  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
1954  CHECK_MB( rval );
1955 
1956  int tag = ID_FROM_HANDLE( remote_handle );
1957  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
1958  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n",
1959  send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner );
1960  mperr = MPI_Isend( &send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
1961  CHECK_MPI( mperr );
1962  }
1963 
1964  // Tag mattag;
1965  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1966 
1967  // Now initialize local data for managing contents of owned, shared sets
1968  assert( specialSets.empty() );
1969  specialSets.clear();
1970  specialSets.reserve( owned.size() );
1971  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1972  {
1973  // int block;
1974  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*i, 1, &block))
1975  // block = 0;
1976  // std::vector<int> ids;
1977 
1978  SpecialSetData data;
1979  data.setHandle = *i;
1980  rval = iFace->get_meshset_options( *i, data.setFlags );
1981  CHECK_MB( rval );
1982  specialSets.push_back( data );
1983  std::vector< EntityHandle > list;
1984  if( data.setFlags & MESHSET_ORDERED )
1985  {
1986  list.clear();
1987  rval = iFace->get_entities_by_handle( *i, list );
1988  CHECK_MB( rval );
1989  rval = vector_to_id_list( list, specialSets.back().contentIds, true );
1990  CHECK_MB( rval );
1991  // if (block)
1992  // get_global_ids(iFace, &list[0], list.size(), MESHSET_ORDERED, ids);
1993  }
1994  else
1995  {
1996  Range range;
1997  rval = iFace->get_entities_by_handle( *i, range );
1998  CHECK_MB( rval );
1999  bool ranged;
2000  rval = range_to_blocked_list( range, specialSets.back().contentIds, ranged );
2001  if( ranged ) specialSets.back().setFlags |= mhdf_SET_RANGE_BIT;
2002  // if (block) {
2003  // std::vector<EntityHandle> tmp;
2004  // for (Range::const_pair_iterator pi = range.const_pair_begin(); pi !=
2005  // range.const_pair_end(); ++pi) {
2006  // tmp.push_back(pi->first);
2007  // tmp.push_back(pi->second);
2008  // }
2009  // get_global_ids(iFace, &tmp[0], tmp.size(), ranged ? 0 : MESHSET_ORDERED, ids);
2010  //}
2011  }
2012 
2013  list.clear();
2014  rval = iFace->get_parent_meshsets( *i, list );
2015  CHECK_MB( rval );
2016  rval = vector_to_id_list( list, specialSets.back().parentIds, true );
2017  CHECK_MB( rval );
2018  rval = iFace->get_child_meshsets( *i, list );
2019  CHECK_MB( rval );
2020  rval = vector_to_id_list( list, specialSets.back().childIds, true );
2021  CHECK_MB( rval );
2022  }
2023 
2024  // Process received buffers, repost larger buffers where necessary
2025  size_t remaining = numrecv;
2026  numrecv = 0;
2027  while( remaining-- )
2028  {
2029  std::vector< unsigned long > dead;
2030  MPI_Status status;
2031  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
2032  CHECK_MPI( mperr );
2033  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2034  std::vector< unsigned long >& buff = recv_buf[idx];
2035  size_t size = buff[1] + buff[2] + buff[3] + 4;
2036  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", (unsigned long)size, status.MPI_TAG,
2037  status.MPI_SOURCE );
2038  if( size <= init_buff_size )
2039  {
2040  rval = unpack_set( handle, &buff[0], init_buff_size );
2041  CHECK_MB( rval );
2042  dead.swap( buff ); // Release memory
2043  }
2044  else
2045  {
2046  // Data was too big for init_buff_size
2047  // repost with larger buffer
2048  buff.resize( size );
2049  dbgOut.printf( 5, "Re-Posting buffer to receive set %d from proc %d with size %lu\n", status.MPI_TAG,
2050  status.MPI_SOURCE, (unsigned long)size );
2051  mperr = MPI_Irecv( &buff[0], size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm,
2052  &lrecv_req[idx] );
2053  CHECK_MPI( mperr );
2054  ++numrecv;
2055  }
2056  recv_req[idx] = MPI_REQUEST_NULL;
2057  }
2058 
2059  // Wait for sends to complete
2060  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2061 
2062  // Re-send sets that didn't fit initial buffer size
2063  idx = 0;
2064  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
2065  {
2066  std::vector< unsigned long >& buff = send_buf[idx];
2067  size_t size = buff[1] + buff[2] + buff[3] + 4;
2068  if( size <= init_buff_size ) continue;
2069 
2070  buff.resize( size );
2071  rval = pack_set( i, &buff[0], size );
2072  CHECK_MB( rval );
2073  EntityHandle remote_handle;
2074  unsigned owner;
2075  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
2076  CHECK_MB( rval );
2077 
2078  int tag = ID_FROM_HANDLE( remote_handle );
2079  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
2080  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n", (unsigned long)size, tag, owner );
2081  mperr = MPI_Isend( &buff[0], size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
2082  CHECK_MPI( mperr );
2083  }
2084 
2085  // Process received buffers
2086  remaining = numrecv;
2087  while( remaining-- )
2088  {
2089  std::vector< unsigned long > dead;
2090  MPI_Status status;
2091  mperr = MPI_Waitany( lrecv_req.size(), &lrecv_req[0], &idx, &status );
2092  CHECK_MPI( mperr );
2093  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2094  std::vector< unsigned long >& buff = recv_buf[idx];
2095  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", 4 + buff[1] + buff[2] + buff[3],
2096  status.MPI_TAG, status.MPI_SOURCE );
2097  rval = unpack_set( handle, &buff[0], buff.size() );
2098  CHECK_MB( rval );
2099  dead.swap( buff ); // Release memory
2100 
2101  lrecv_req[idx] = MPI_REQUEST_NULL;
2102  }
2103 
2104  // Wait for sends to complete
2105  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2106 
2107  return MB_SUCCESS;
2108 }

References moab::Range::begin(), CHECK_MB, CHECK_MPI, moab::CREATE_HANDLE(), moab::WriteHDF5::dbgOut, moab::Range::end(), ErrorCode, moab::Interface::get_child_meshsets(), moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_procs(), moab::Interface::get_meshset_options(), moab::Interface::get_parent_meshsets(), moab::ID_FROM_HANDLE(), moab::WriteHDF5::iFace, MB_SUCCESS, MBENTITYSET, moab::Range::merge(), mhdf_SET_RANGE_BIT, myPcomm, pack_set(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::range_to_blocked_list(), moab::WriteHDF5::SpecialSetData::setFlags, moab::WriteHDF5::SpecialSetData::setHandle, size, moab::Range::size(), moab::WriteHDF5::specialSets, moab::DebugOutput::tprintf(), unpack_set(), and moab::WriteHDF5::vector_to_id_list().

Referenced by create_meshset_tables().

◆ communicate_shared_set_ids()

ErrorCode moab::WriteHDF5Parallel::communicate_shared_set_ids ( const Range owned,
const Range remote 
)
protected

Get remote ids for shared sets.

Definition at line 1511 of file WriteHDF5Parallel.cpp.

1512 {
1513  ErrorCode rval;
1514  int mperr;
1515  const int TAG = 0xD0E;
1516  // const unsigned rank = myPcomm->proc_config().proc_rank();
1517  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1518 
1519  dbgOut.tprint( 1, "COMMUNICATING SHARED SET IDS\n" );
1520  dbgOut.print( 6, "Owned, shared sets: ", owned );
1521 
1522  // Post receive buffers for all procs for which we share sets
1523 
1524  std::vector< unsigned > procs;
1525  rval = myPcomm->get_entityset_owners( procs );
1526  CHECK_MB( rval );
1527  std::vector< unsigned >::iterator it = std::find( procs.begin(), procs.end(), myPcomm->proc_config().proc_rank() );
1528  if( it != procs.end() ) procs.erase( it );
1529 
1530  std::vector< MPI_Request > recv_req( procs.size(), MPI_REQUEST_NULL );
1531  std::vector< std::vector< unsigned long > > recv_buf( procs.size() );
1532 
1533  size_t recv_count = 0;
1534  for( size_t i = 0; i < procs.size(); ++i )
1535  {
1536  Range tmp;
1537  rval = myPcomm->get_owned_sets( procs[i], tmp );
1538  CHECK_MB( rval );
1539  size_t count =
1540  intersect( tmp, remote ).size(); // Necessary because we might not be writing all of the database
1541  if( count )
1542  {
1543  dbgOut.printf( 6, "Sets owned by proc %u (remote handles): ", procs[i] );
1544  if( dbgOut.get_verbosity() >= 6 )
1545  {
1546  Range remote_handles;
1547  tmp = intersect( tmp, remote );
1548  for( Range::iterator j = tmp.begin(); j != tmp.end(); ++j )
1549  {
1550  unsigned r;
1551  EntityHandle h;
1552  myPcomm->get_entityset_owner( *j, r, &h );
1553  assert( r == procs[i] );
1554  remote_handles.insert( h );
1555  }
1556  dbgOut.print( 6, remote_handles );
1557  }
1558  recv_count++;
1559  recv_buf[i].resize( 2 * count + 1 );
1560  dbgOut.printf( 5, "Posting receive buffer of size %lu for proc %u (%lu of %lu owned sets)\n",
1561  (unsigned long)recv_buf[i].size(), procs[i], count, tmp.size() );
1562  mperr =
1563  MPI_Irecv( &recv_buf[i][0], recv_buf[i].size(), MPI_UNSIGNED_LONG, procs[i], TAG, comm, &recv_req[i] );
1564  CHECK_MPI( mperr );
1565  }
1566  }
1567 
1568  // Send set ids to all procs with which we share them
1569 
1570  // First build per-process lists of sets for which we need to send data
1571  std::map< unsigned, Range > send_sets;
1572  std::vector< unsigned > set_procs;
1573  for( Range::reverse_iterator i = owned.rbegin(); i != owned.rend(); ++i )
1574  {
1575  set_procs.clear();
1576  rval = myPcomm->get_entityset_procs( *i, set_procs );
1577  CHECK_MB( rval );
1578  for( size_t j = 0; j < set_procs.size(); ++j )
1579  if( set_procs[j] != myPcomm->proc_config().proc_rank() ) send_sets[set_procs[j]].insert( *i );
1580  }
1581  assert( send_sets.find( myPcomm->proc_config().proc_rank() ) == send_sets.end() );
1582 
1583  // Now send the data
1584  std::vector< std::vector< unsigned long > > send_buf( send_sets.size() );
1585  std::vector< MPI_Request > send_req( send_sets.size() );
1586  std::map< unsigned, Range >::iterator si = send_sets.begin();
1587  for( size_t i = 0; si != send_sets.end(); ++si, ++i )
1588  {
1589  dbgOut.printf( 6, "Sending data for shared sets to proc %u: ", si->first );
1590  dbgOut.print( 6, si->second );
1591 
1592  send_buf[i].reserve( 2 * si->second.size() + 1 );
1593  send_buf[i].push_back( si->second.size() );
1594  for( Range::iterator j = si->second.begin(); j != si->second.end(); ++j )
1595  {
1596  send_buf[i].push_back( *j );
1597  send_buf[i].push_back( idMap.find( *j ) );
1598  }
1599  dbgOut.printf( 5, "Sending buffer of size %lu to proc %u (%lu of %lu owned sets)\n",
1600  (unsigned long)send_buf[i].size(), si->first, si->second.size(), owned.size() );
1601  mperr = MPI_Isend( &send_buf[i][0], send_buf[i].size(), MPI_UNSIGNED_LONG, si->first, TAG, comm, &send_req[i] );
1602  }
1603 
1604  // Process received data
1605  MPI_Status status;
1606  int idx;
1607  while( recv_count-- )
1608  {
1609  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
1610  CHECK_MPI( mperr );
1611 
1612  assert( (unsigned)status.MPI_SOURCE == procs[idx] );
1613  assert( 2 * recv_buf[idx].front() + 1 == recv_buf[idx].size() );
1614  const size_t n = std::min< size_t >( recv_buf[idx].front(), ( recv_buf[idx].size() - 1 ) / 2 );
1615  dbgOut.printf( 5, "Received buffer of size %lu from proc %d\n", (unsigned long)( 2 * n + 1 ),
1616  (int)status.MPI_SOURCE );
1617 
1618  for( size_t i = 0; i < n; ++i )
1619  {
1620  EntityHandle handle = 0;
1621  rval = myPcomm->get_entityset_local_handle( procs[idx], recv_buf[idx][2 * i + 1], handle );
1622  CHECK_MB( rval );
1623  assert( handle != 0 );
1624  if( !idMap.insert( handle, recv_buf[idx][2 * i + 2], 1 ).second )
1625  error( MB_FAILURE ); // Conflicting IDs??????
1626  }
1627 
1628  recv_req[idx] = MPI_REQUEST_NULL;
1629  }
1630  assert( MPI_SUCCESS == MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ) &&
1631  MPI_UNDEFINED == idx ); // Check that we got them all
1632 
1633  // Wait for all sends to complete before we release send
1634  // buffers (implicitly releases when we return from this function)
1635 
1636  std::vector< MPI_Status > stats( send_req.size() );
1637  mperr = MPI_Waitall( send_req.size(), &send_req[0], &stats[0] );
1638  CHECK_MPI( mperr );
1639 
1641 
1642  return MB_SUCCESS;
1643 }

References moab::Range::begin(), CHECK_MB, CHECK_MPI, moab::WriteHDF5::dbgOut, moab::Range::end(), moab::error(), ErrorCode, moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_entityset_local_handle(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_owners(), moab::ParallelComm::get_entityset_procs(), moab::ParallelComm::get_owned_sets(), moab::DebugOutput::get_verbosity(), moab::WriteHDF5::idMap, moab::Range::insert(), moab::RangeMap< KeyType, ValType, NullVal >::insert(), moab::intersect(), MB_SUCCESS, myPcomm, moab::DebugOutput::print(), print_shared_sets(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::Range::rbegin(), moab::Range::rend(), size, moab::Range::size(), moab::SSVB, and moab::DebugOutput::tprint().

Referenced by create_meshset_tables().

◆ create_adjacency_tables()

ErrorCode moab::WriteHDF5Parallel::create_adjacency_tables ( )
protected

Create tables to hold element adjacencies.

Definition at line 1402 of file WriteHDF5Parallel.cpp.

1403 {
1404  struct AdjSetCreator : public DataSetCreator
1405  {
1406  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1407  {
1408  mhdf_Status status;
1409  hid_t handle = mhdf_createAdjacency( file->file_ptr(), ex->name(), size, &status );
1410  CHECK_HDFN( status );
1411  mhdf_closeData( file->file_ptr(), handle, &status );
1412  CHECK_HDFN( status );
1413  start_id = -1;
1414  return MB_SUCCESS;
1415  }
1416  };
1417 
1418  std::vector< ExportSet* > groups;
1419 #ifdef WRITE_NODE_ADJACENCIES
1420  groups.push_back( &nodeSet );
1421 #endif
1422  for( std::list< ExportSet >::iterator ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter )
1423  groups.push_back( &*ex_iter );
1424 
1425  ErrorCode rval;
1426  const int numtypes = groups.size();
1427  std::vector< long > counts( numtypes );
1428  std::vector< long > offsets( numtypes );
1429  std::vector< long > max_ents( numtypes );
1430  std::vector< long > totals( numtypes );
1431  for( int i = 0; i < numtypes; ++i )
1432  {
1433  wid_t count;
1434  rval = count_adjacencies( groups[i]->range, count );
1435  CHECK_MB( rval );
1436  counts[i] = count;
1437  }
1438 
1439  rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &totals[0], AdjSetCreator(), &groups[0] );
1440  CHECK_MB( rval );
1441 
1442  // Cppcheck warning (false positive): variable groups is assigned a value that is never used
1443  for( int i = 0; i < numtypes; ++i )
1444  {
1445  groups[i]->max_num_adjs = max_ents[i];
1446  groups[i]->adj_offset = offsets[i];
1447  }
1448  return MB_SUCCESS;
1449 }

References CHECK_HDFN, CHECK_MB, moab::WriteHDF5::count_adjacencies(), create_dataset(), ErrorCode, moab::WriteHDF5::exportList, moab::WriteHDF5::file_ptr(), MB_SUCCESS, mhdf_closeData(), mhdf_createAdjacency(), moab::WriteHDF5::ExportSet::name(), moab::WriteHDF5::nodeSet, and size.

Referenced by parallel_create_file().

◆ create_dataset()

ErrorCode moab::WriteHDF5Parallel::create_dataset ( int  num_datasets,
const long *  num_owned_entities,
long *  offsets_out,
long *  max_proc_ents_out,
long *  total_ents_out,
const DataSetCreator creator = NoopDescCreator(),
ExportSet groups[] = 0,
wid_t first_ids_out = NULL 
)
protected

Do typical communication for dataset creation.

Given the number of entities each processor intends to write, do necessary communication and create dataset on root, passing back misc info to each proc.

Parameters
creatorFunctor to do actual dataset creation. Used only on root process.
num_datasetsThe number of datasets to create.
groupsThird argument passed to DataSetCreator. Array of length num_datasets pr NULL.
num_owned_entitiesThe number of entities this proc will write. Array of length num_datasets .
offsets_outOutput: The offset in the dataset at which this process should write. Array of length num_datasets .
max_proc_ents_outOutput: The maximun number of entities that any proc will write Array of length num_datasets .
total_ents_outOutput: The size of the created dataset (sum of counts over all procs) Array of length num_datasets .
first_ids_outOutput: The first ID of the first entity in the data set. First ID for this proc's entities is first_id_out+offset_out Array of length num_datasets or NULL.

Definition at line 1080 of file WriteHDF5Parallel.cpp.

1088 {
1089  int result;
1090  ErrorCode rval;
1091  const unsigned rank = myPcomm->proc_config().proc_rank();
1092  const unsigned nproc = myPcomm->proc_config().proc_size();
1093  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1094 
1095  // Gather entity counts for each processor on root
1096  std::vector< long > counts( rank ? 0 : nproc * num_datasets );
1097  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &num_owned, sizeof( long ) );
1098  result = MPI_Gather( const_cast< long* >( num_owned ), num_datasets, MPI_LONG, &counts[0], num_datasets, MPI_LONG,
1099  0, comm );
1100  CHECK_MPI( result );
1101 
1102  // Create node data in file
1103  DatasetVals zero_val = { 0, 0, 0 };
1104  std::vector< DatasetVals > cumulative( num_datasets, zero_val );
1105  if( rank == 0 )
1106  {
1107  for( unsigned i = 0; i < nproc; i++ )
1108  {
1109  const long* proc_data = &counts[i * num_datasets];
1110  for( int index = 0; index < num_datasets; ++index )
1111  {
1112  cumulative[index].total += proc_data[index];
1113  if( proc_data[index] > cumulative[index].max_count ) cumulative[index].max_count = proc_data[index];
1114  }
1115  }
1116 
1117  for( int index = 0; index < num_datasets; ++index )
1118  {
1119  if( cumulative[index].total )
1120  {
1121  rval = creator( this, cumulative[index].total, groups ? groups[index] : 0, cumulative[index].start_id );
1122  CHECK_MB( rval );
1123  }
1124  else
1125  {
1126  cumulative[index].start_id = -1;
1127  }
1128  }
1129  }
1130 
1131  // Send id offset to every proc
1132  result = MPI_Bcast( (void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm );
1133  CHECK_MPI( result );
1134  for( int index = 0; index < num_datasets; ++index )
1135  {
1136  if( first_ids_out ) first_ids_out[index] = (wid_t)cumulative[index].start_id;
1137  max_proc_entities[index] = cumulative[index].max_count;
1138  total_entities[index] = cumulative[index].total;
1139  }
1140 
1141  // Convert array of per-process counts to per-process offsets
1142  if( rank == 0 )
1143  {
1144  // Initialize prev_size with data sizes for root process
1145  std::vector< long > prev_size( counts.begin(), counts.begin() + num_datasets );
1146  // Root process gets offset zero
1147  std::fill( counts.begin(), counts.begin() + num_datasets, 0L );
1148  // For each proc other than this one (root)
1149  for( unsigned i = 1; i < nproc; ++i )
1150  {
1151  // Get pointer to offsets for previous process in list
1152  long* prev_data = &counts[( i - 1 ) * num_datasets];
1153  // Get pointer to offsets for this process in list
1154  long* proc_data = &counts[i * num_datasets];
1155  // For each data set
1156  for( int j = 0; j < num_datasets; ++j )
1157  {
1158  // Get size of data in dataset from process i
1159  long mysize = proc_data[j];
1160  // Offset for process i is offset of previous process plus
1161  // number of values previous process will write
1162  proc_data[j] = prev_data[j] + prev_size[j];
1163  // Store my size, as it is no longer available in 'counts'
1164  prev_size[j] = mysize;
1165  }
1166  }
1167  }
1168 
1169  // Send each proc it's offset in the table
1170  if( rank == 0 )
1171  {
1172  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &counts[0], num_datasets * nproc * sizeof( long ) );
1173  }
1174  result = MPI_Scatter( &counts[0], num_datasets, MPI_LONG, offsets_out, num_datasets, MPI_LONG, 0, comm );
1175  CHECK_MPI( result );
1176 
1177  return MB_SUCCESS;
1178 }

References CHECK_MB, CHECK_MPI, ErrorCode, MB_SUCCESS, myPcomm, moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::ProcConfig::proc_size(), and VALGRIND_CHECK_MEM_IS_DEFINED.

Referenced by create_adjacency_tables(), create_element_tables(), create_meshset_tables(), create_node_table(), and create_tag_tables().

◆ create_element_tables()

ErrorCode moab::WriteHDF5Parallel::create_element_tables ( )
protected

Create tables to hold element connectivity.

Definition at line 1363 of file WriteHDF5Parallel.cpp.

1364 {
1365  struct ElemSetCreator : public DataSetCreator
1366  {
1367  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1368  {
1369  return file->create_elem_table( *ex, size, start_id );
1370  }
1371  };
1372 
1373  const int numtypes = exportList.size();
1374  std::vector< ExportSet* > groups( numtypes );
1375  std::vector< long > counts( numtypes ), offsets( numtypes ), max_ents( numtypes ), total_ents( numtypes );
1376  std::vector< wid_t > start_ids( numtypes );
1377 
1378  size_t idx = 0;
1379  std::list< ExportSet >::iterator ex_iter;
1380  for( ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1381  {
1382  groups[idx] = &*ex_iter;
1383  counts[idx] = ex_iter->range.size();
1384  }
1385  ErrorCode rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &total_ents[0], ElemSetCreator(),
1386  &groups[0], &start_ids[0] );
1387  CHECK_MB( rval );
1388 
1389  for( idx = 0, ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1390  {
1391  ex_iter->first_id = start_ids[idx];
1392  ex_iter->offset = offsets[idx];
1393  ex_iter->max_num_ents = max_ents[idx];
1394  ex_iter->total_num_ents = total_ents[idx];
1395  rval = assign_ids( ex_iter->range, ex_iter->first_id + ex_iter->offset );
1396  CHECK_MB( rval );
1397  }
1398 
1399  return MB_SUCCESS;
1400 }

References moab::WriteHDF5::assign_ids(), CHECK_MB, create_dataset(), moab::WriteHDF5::create_elem_table(), ErrorCode, moab::WriteHDF5::exportList, MB_SUCCESS, and size.

Referenced by parallel_create_file().

◆ create_meshset_tables()

ErrorCode moab::WriteHDF5Parallel::create_meshset_tables ( double *  times)
protected

Create tables for mesh sets.

Definition at line 2110 of file WriteHDF5Parallel.cpp.

2111 {
2112  Range::const_iterator riter;
2113  const unsigned rank = myPcomm->proc_config().proc_rank();
2114 
2115  START_SERIAL;
2117  END_SERIAL;
2118  CpuTimer timer;
2119 
2120  // Remove remote sets from setSets
2121  Range shared, owned, remote;
2122  ErrorCode rval = myPcomm->get_shared_sets( shared );
2123  CHECK_MB( rval );
2124  shared = intersect( shared, setSet.range );
2125  rval = myPcomm->get_owned_sets( rank, owned );
2126  CHECK_MB( rval );
2127  owned = intersect( owned, setSet.range );
2128  remote = subtract( shared, owned );
2129  setSet.range = subtract( setSet.range, remote );
2130 
2131  // Create set meta table
2132  struct SetDescCreator : public DataSetCreator
2133  {
2134  ErrorCode operator()( WriteHDF5* writer, long size, const ExportSet*, long& start_id ) const
2135  {
2136  return writer->create_set_meta( size, start_id );
2137  }
2138  };
2139  long count = setSet.range.size();
2140  rval = create_dataset( 1, &count, &setSet.offset, &setSet.max_num_ents, &setSet.total_num_ents, SetDescCreator(),
2141  NULL, &setSet.first_id );
2142  CHECK_MB( rval );
2144 
2146  CHECK_MB( rval );
2147  if( times ) times[SET_OFFSET_TIME] = timer.time_elapsed();
2148 
2149  // Exchange file IDS for sets between all procs
2150  rval = communicate_shared_set_ids( owned, remote );
2151  CHECK_MB( rval );
2152  if( times ) times[SHARED_SET_IDS] = timer.time_elapsed();
2153 
2154  // Communicate remote set contents, children, etc.
2155  rval = communicate_shared_set_data( owned, remote );
2156  CHECK_MB( rval );
2157  if( times ) times[SHARED_SET_CONTENTS] = timer.time_elapsed();
2158 
2159  // Communicate counts for owned sets
2160  long data_counts[3]; // { #contents, #children, #parents }
2161  rval = count_set_size( setSet.range, data_counts[0], data_counts[1], data_counts[2] );
2162  CHECK_MB( rval );
2163  if( times ) times[SET_OFFSET_TIME] += timer.time_elapsed();
2164 
2165  long offsets[3], max_counts[3], totals[3];
2166  rval = create_dataset( 3, data_counts, offsets, max_counts, totals );
2167  CHECK_MB( rval );
2168 
2169  // Create the datasets
2170  if( 0 == myPcomm->proc_config().proc_rank() )
2171  {
2172  rval = create_set_tables( totals[0], totals[1], totals[2] );
2173  CHECK_MB( rval );
2174  }
2175 
2176  // Store communicated global data
2177  setContentsOffset = offsets[0];
2178  setChildrenOffset = offsets[1];
2179  setParentsOffset = offsets[2];
2180  maxNumSetContents = max_counts[0];
2181  maxNumSetChildren = max_counts[1];
2182  maxNumSetParents = max_counts[2];
2183  writeSetContents = totals[0] > 0;
2184  writeSetChildren = totals[1] > 0;
2185  writeSetParents = totals[2] > 0;
2186 
2187  dbgOut.printf( 2, "set contents: %ld local, %ld global, offset = %ld\n", data_counts[0], totals[0], offsets[0] );
2188  dbgOut.printf( 2, "set children: %ld local, %ld global, offset = %ld\n", data_counts[1], totals[1], offsets[1] );
2189  dbgOut.printf( 2, "set parents: %ld local, %ld global, offset = %ld\n", data_counts[2], totals[2], offsets[2] );
2190 
2191  return MB_SUCCESS;
2192 }

References moab::WriteHDF5::assign_ids(), CHECK_MB, communicate_shared_set_data(), communicate_shared_set_ids(), moab::WriteHDF5::count_set_size(), create_dataset(), moab::WriteHDF5::create_set_meta(), moab::WriteHDF5::create_set_tables(), moab::WriteHDF5::dbgOut, END_SERIAL, ErrorCode, moab::WriteHDF5::ExportSet::first_id, moab::ParallelComm::get_owned_sets(), moab::ParallelComm::get_shared_sets(), moab::WriteHDF5::iFace, moab::intersect(), moab::WriteHDF5::ExportSet::max_num_ents, moab::WriteHDF5::maxNumSetChildren, moab::WriteHDF5::maxNumSetContents, moab::WriteHDF5::maxNumSetParents, MB_SUCCESS, myPcomm, moab::WriteHDF5::ExportSet::offset, moab::print_type_sets(), moab::DebugOutput::printf(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::SET_OFFSET_TIME, moab::WriteHDF5::setChildrenOffset, moab::WriteHDF5::setContentsOffset, moab::WriteHDF5::setParentsOffset, moab::WriteHDF5::setSet, moab::WriteHDF5::SHARED_SET_CONTENTS, moab::WriteHDF5::SHARED_SET_IDS, size, moab::Range::size(), START_SERIAL, moab::subtract(), moab::CpuTimer::time_elapsed(), moab::WriteHDF5::ExportSet::total_num_ents, moab::WriteHDF5::writeSetChildren, moab::WriteHDF5::writeSetContents, moab::WriteHDF5::writeSetParents, and moab::WriteHDF5::writeSets.

Referenced by parallel_create_file().

◆ create_node_table()

ErrorCode moab::WriteHDF5Parallel::create_node_table ( int  dimension)
protected

Create the node table in the file.

Definition at line 1180 of file WriteHDF5Parallel.cpp.

1181 {
1182  nodeSet.num_nodes = dimension; // Put it here so NodeSetCreator can access it
1183  struct NodeSetCreator : public DataSetCreator
1184  {
1185  ErrorCode operator()( WriteHDF5* file, long count, const ExportSet* group, long& start_id ) const
1186  {
1187  mhdf_Status status;
1188  hid_t handle = mhdf_createNodeCoords( file->file_ptr(), group->num_nodes, count, &start_id, &status );
1189  CHECK_HDFN( status );
1190  mhdf_closeData( file->file_ptr(), handle, &status );
1191  CHECK_HDFN( status );
1192  return MB_SUCCESS;
1193  }
1194  };
1195 
1196  const long count = nodeSet.range.size();
1197  ExportSet* array[] = { &nodeSet };
1199  NodeSetCreator(), array, &nodeSet.first_id );
1200  CHECK_MB( rval );
1202 }

References moab::WriteHDF5::assign_ids(), CHECK_HDFN, CHECK_MB, create_dataset(), ErrorCode, moab::WriteHDF5::file_ptr(), moab::WriteHDF5::ExportSet::first_id, moab::WriteHDF5::ExportSet::max_num_ents, MB_SUCCESS, mhdf_closeData(), mhdf_createNodeCoords(), moab::WriteHDF5::nodeSet, moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::offset, moab::WriteHDF5::ExportSet::range, moab::Range::size(), and moab::WriteHDF5::ExportSet::total_num_ents.

Referenced by parallel_create_file().

◆ create_tag_tables()

ErrorCode moab::WriteHDF5Parallel::create_tag_tables ( )
protected

Write tag descriptions and create tables to hold tag data.

Definition at line 753 of file WriteHDF5Parallel.cpp.

754 {
755  std::list< TagDesc >::iterator tag_iter;
756  ErrorCode rval;
757  int err;
758  const int rank = myPcomm->proc_config().proc_rank();
759  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
760 
761  subState.start( "negotiating tag list" );
762 
763  dbgOut.tprint( 1, "communicating tag metadata\n" );
764 
765  dbgOut.printf( 2, "Exchanging tag data for %d tags.\n", (int)tagList.size() );
766 
767  // Sort tagList contents in alphabetical order by tag name
768  tagList.sort( TagNameCompare( iFace ) );
769 
770  // Negotiate total list of tags to write
771 
772  // Build concatenated list of all tag data
773  std::vector< unsigned char > tag_buffer;
774  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
775  {
776  rval = append_serial_tag_data( tag_buffer, *tag_iter );
777  CHECK_MB( rval );
778  }
779 
780  // Broadcast list from root to all other procs
781  unsigned long size = tag_buffer.size();
782  err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, comm );
783  CHECK_MPI( err );
784  tag_buffer.resize( size );
785  err = MPI_Bcast( &tag_buffer[0], size, MPI_UNSIGNED_CHAR, 0, comm );
786  CHECK_MPI( err );
787 
788  // Update local tag list
789  std::vector< TagDesc* > missing;
790  rval = check_serial_tag_data( tag_buffer, &missing, 0 );
791  CHECK_MB( rval );
792 
793  // Check if we're done (0->done, 1->more, 2+->error)
794  int code, lcode = ( MB_SUCCESS != rval ) ? rval + 2 : missing.empty() ? 0 : 1;
795  err = MPI_Allreduce( &lcode, &code, 1, MPI_INT, MPI_MAX, comm );
796  CHECK_MPI( err );
797  if( code > 1 )
798  {
799  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
800  return error( (ErrorCode)( code - 2 ) );
801  }
802 
803  // If not done...
804  if( code )
805  {
806  dbgOut.print( 1, "Not all procs had same tag definitions, negotiating...\n" );
807 
808  // Get tags defined on this proc but not on root proc
809  tag_buffer.clear();
810  for( size_t i = 0; i < missing.size(); ++i )
811  {
812  rval = append_serial_tag_data( tag_buffer, *missing[i] );
813  CHECK_MB( rval );
814  }
815 
816  // Gather extra tag definitions on root processor
817  std::vector< int > junk; // don't care how many from each proc
818  assert( rank || tag_buffer.empty() ); // must be empty on root
819  err = my_Gatherv( &tag_buffer[0], tag_buffer.size(), MPI_UNSIGNED_CHAR, tag_buffer, junk, 0, comm );
820  CHECK_MPI( err );
821 
822  // Process serialized tag descriptions on root, and
823  rval = MB_SUCCESS;
824  if( 0 == rank )
825  {
826  // Process serialized tag descriptions on root, and
827  std::vector< TagDesc* > newlist;
828  rval = check_serial_tag_data( tag_buffer, 0, &newlist );
829  tag_buffer.clear();
830  // re-serialize a unique list of new tag definitions
831  for( size_t i = 0; MB_SUCCESS == rval && i != newlist.size(); ++i )
832  {
833  rval = append_serial_tag_data( tag_buffer, *newlist[i] );
834  CHECK_MB( rval );
835  }
836  }
837 
838  // Broadcast any new tag definitions from root to other procs
839  long this_size = tag_buffer.size();
840  if( MB_SUCCESS != rval ) this_size = -rval;
841  err = MPI_Bcast( &this_size, 1, MPI_LONG, 0, comm );
842  CHECK_MPI( err );
843  if( this_size < 0 )
844  {
845  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
846  return error( (ErrorCode)-this_size );
847  }
848  tag_buffer.resize( this_size );
849  err = MPI_Bcast( &tag_buffer[0], this_size, MPI_UNSIGNED_CHAR, 0, comm );
850  CHECK_MPI( err );
851 
852  // Process new tag definitions
853  rval = check_serial_tag_data( tag_buffer, 0, 0 );
854  CHECK_MB( rval );
855  }
856 
857  subState.end();
858  subState.start( "negotiate which element/tag combinations are dense" );
859 
860  // Figure out for which tag/element combinations we can
861  // write dense tag data.
862 
863  // Construct a table of bits,
864  // where each row of the table corresponds to a tag
865  // and each column to an element group.
866 
867  // Two extra, because first is nodes and last is sets.
868  // (n+7)/8 is ceil(n/8)
869  const int bytes_per_tag = ( exportList.size() + 9 ) / 8;
870  std::vector< unsigned char > data( bytes_per_tag * tagList.size(), 0 );
871  std::vector< unsigned char > recv( data.size(), 0 );
872  unsigned char* iter = &data[0];
873  if( writeTagDense && !data.empty() )
874  {
875  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, iter += bytes_per_tag )
876  {
877 
878  Range tagged;
879  rval = get_sparse_tagged_entities( *tag_iter, tagged );
880  CHECK_MB( rval );
881 
882  int s;
883  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) continue;
884 
885  std::string n;
886  iFace->tag_get_name( tag_iter->tag_id,
887  n ); // Second time we've called, so shouldn't fail
888 
889  // Check if we want to write this tag in dense format even if not
890  // all of the entities have a tag value. The criterion of this
891  // is that the tag be dense, have a default value, and have at
892  // least 2/3 of the entities tagged.
893  bool prefer_dense = false;
894  TagType type;
895  rval = iFace->tag_get_type( tag_iter->tag_id, type );
896  CHECK_MB( rval );
897  if( MB_TAG_DENSE == type )
898  {
899  const void* defval = 0;
900  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
901  if( MB_SUCCESS == rval ) prefer_dense = true;
902  }
903 
904  int i = 0;
905  if( check_dense_format_tag( nodeSet, tagged, prefer_dense ) )
906  {
907  set_bit( i, iter );
908  dbgOut.printf( 2, "Can write dense data for \"%s\"/Nodes\n", n.c_str() );
909  }
910  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
911  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
912  {
913  // when writing in parallel, on some partitions, some of these element ranges might
914  // be empty so do not turn this tag as sparse, just because of that, leave it dense,
915  // if we prefer dense
916  if( ( prefer_dense && ex_iter->range.empty() ) ||
917  check_dense_format_tag( *ex_iter, tagged, prefer_dense ) )
918  {
919  set_bit( i, iter );
920  dbgOut.printf( 2, "Can write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
921  }
922  }
923  if( check_dense_format_tag( setSet, tagged, prefer_dense ) )
924  {
925  set_bit( i, iter );
926  dbgOut.printf( 2, "Can write dense data for \"%s\"/Sets\n", n.c_str() );
927  }
928  }
929 
930  // Do bit-wise AND of list over all processors (only write dense format
931  // if all proccesses want dense format for this group of entities).
932  err = MPI_Allreduce( &data[0], &recv[0], data.size(), MPI_UNSIGNED_CHAR, MPI_BAND,
934  CHECK_MPI( err );
935  } // if (writeTagDense)
936 
937  // Store initial counts for sparse-formatted tag data.
938  // The total number of values to send and receive will be the number of
939  // tags plus the number of var-len tags because we need to negotiate
940  // offsets into two different tables for the var-len tags.
941  std::vector< long > counts;
942 
943  // Record dense tag/element combinations
944  iter = &recv[0];
945  const unsigned char* iter2 = &data[0];
946  for( tag_iter = tagList.begin(); tag_iter != tagList.end();
947  ++tag_iter, iter += bytes_per_tag, iter2 += bytes_per_tag )
948  {
949 
950  Range tagged;
951  rval = get_sparse_tagged_entities( *tag_iter, tagged );
952  CHECK_MB( rval );
953 
954  std::string n;
955  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
956 
957  int i = 0;
958  if( get_bit( i, iter ) )
959  {
960  assert( get_bit( i, iter2 ) );
961  tag_iter->dense_list.push_back( nodeSet );
962  tagged -= nodeSet.range;
963  dbgOut.printf( 2, "Will write dense data for \"%s\"/Nodes\n", n.c_str() );
964  }
965  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
966  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
967  {
968  if( get_bit( i, iter ) )
969  {
970  assert( get_bit( i, iter2 ) );
971  tag_iter->dense_list.push_back( *ex_iter );
972  dbgOut.printf( 2, "WIll write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
973  tagged -= ex_iter->range;
974  }
975  }
976  if( get_bit( i, iter ) )
977  {
978  assert( get_bit( i, iter2 ) );
979  tag_iter->dense_list.push_back( setSet );
980  dbgOut.printf( 2, "Will write dense data for \"%s\"/Sets\n", n.c_str() );
981  tagged -= setSet.range;
982  }
983 
984  counts.push_back( tagged.size() );
985 
986  int s;
987  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
988  {
989  unsigned long data_len;
990  rval = get_tag_data_length( *tag_iter, tagged, data_len );
991  CHECK_MB( rval );
992  counts.push_back( data_len );
993  }
994  }
995 
996  subState.end();
997  subState.start( "Negotiate offsets for sparse tag info" );
998 
999  std::vector< long > offsets( counts.size() ), maxima( counts.size() ), totals( counts.size() );
1000  rval = create_dataset( counts.size(), &counts[0], &offsets[0], &maxima[0], &totals[0] );
1001  CHECK_MB( rval );
1002 
1003  // Copy values into local structs and if root then create tables
1004  size_t idx = 0;
1005  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx )
1006  {
1007  assert( idx < counts.size() );
1008  tag_iter->sparse_offset = offsets[idx];
1009  tag_iter->max_num_ents = maxima[idx];
1010  tag_iter->write_sparse = ( 0 != totals[idx] );
1011  int s;
1012  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1013  {
1014  ++idx;
1015  assert( idx < counts.size() );
1016  tag_iter->var_data_offset = offsets[idx];
1017  tag_iter->max_num_vals = maxima[idx];
1018  }
1019  else
1020  {
1021  tag_iter->var_data_offset = 0;
1022  tag_iter->max_num_vals = 0;
1023  }
1024  }
1025 
1026  subState.end();
1027 
1028  // Create tag tables on root process
1029  if( 0 == myPcomm->proc_config().proc_rank() )
1030  {
1031  size_t iidx = 0;
1032  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx )
1033  {
1034  assert( iidx < totals.size() );
1035  unsigned long num_ents = totals[iidx];
1036  unsigned long num_val = 0;
1037  int s;
1038  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1039  {
1040  ++iidx;
1041  assert( iidx < totals.size() );
1042  num_val = totals[iidx];
1043  }
1044  dbgOut.printf( 2, "Writing tag description for tag 0x%lx with %lu values\n",
1045  (unsigned long)tag_iter->tag_id, num_val ? num_val : num_ents );
1046 
1047  rval = create_tag( *tag_iter, num_ents, num_val );
1048  if( MB_SUCCESS != rval ) return error( rval );
1049  }
1050  }
1051 
1052  if( dbgOut.get_verbosity() > 1 )
1053  {
1054  dbgOut.printf( 2, "Tags: %12s %8s %8s %8s %8s %8s\n", "Name", "Count", "Offset", "Var Off", "Max Ent",
1055  "Handle" );
1056 
1057  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
1058  {
1059  std::string name;
1060  iFace->tag_get_name( tag_iter->tag_id, name );
1061  size_t this_size;
1062  get_num_sparse_tagged_entities( *tag_iter, this_size );
1063  dbgOut.printf( 2, "%18s %8lu %8lu %8lu %8lu 0x%7lx\n", name.c_str(), (unsigned long)this_size,
1064  (unsigned long)tag_iter->sparse_offset, (unsigned long)tag_iter->var_data_offset,
1065  (unsigned long)tag_iter->max_num_ents, (unsigned long)tag_iter->tag_id );
1066  }
1067  }
1068 
1069  return MB_SUCCESS;
1070 }

References append_serial_tag_data(), moab::WriteHDF5::check_dense_format_tag(), CHECK_MB, CHECK_MPI, check_serial_tag_data(), create_dataset(), moab::WriteHDF5::create_tag(), moab::WriteHDF5::dbgOut, moab::error(), ErrorCode, moab::WriteHDF5::exportList, moab::get_bit(), moab::WriteHDF5::get_num_sparse_tagged_entities(), moab::WriteHDF5::get_sparse_tagged_entities(), moab::WriteHDF5::get_tag_data_length(), moab::DebugOutput::get_verbosity(), moab::WriteHDF5::iFace, MB_SET_ERR_CONT, MB_SUCCESS, MB_TAG_DENSE, MB_VARIABLE_DATA_LENGTH, moab::my_Gatherv(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::WriteHDF5::ExportSet::range, moab::set_bit(), moab::WriteHDF5::setSet, size, moab::Range::size(), moab::WriteHDF5::subState, moab::Interface::tag_get_default_value(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), moab::Interface::tag_get_type(), moab::WriteHDF5::tagList, TagType, moab::DebugOutput::tprint(), and moab::WriteHDF5::writeTagDense.

Referenced by parallel_create_file().

◆ debug_barrier_line()

void moab::WriteHDF5Parallel::debug_barrier_line ( int  lineno)
protectedvirtual

Reimplemented from moab::WriteHDF5.

Definition at line 263 of file WriteHDF5Parallel.cpp.

264 {
265  const unsigned threshold = 2;
266  static unsigned long count = 0;
267  if( dbgOut.get_verbosity() >= threshold && myPcomm )
268  {
269  dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
270  MPI_Barrier( myPcomm->proc_config().proc_comm() );
271  }
272 }

References moab::WriteHDF5::dbgOut, moab::DebugOutput::get_verbosity(), myPcomm, moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), and moab::ParallelComm::proc_config().

◆ exchange_file_ids()

ErrorCode moab::WriteHDF5Parallel::exchange_file_ids ( const Range non_local_ents)
protected

For entities that will be written by another processor but are referenced by entities on this processor, get the file Ids that will be assigned to those so they can be referenced by entities to be written on this processor.

Parameters
non_local_entsList of entities that are not to be written by this processor but are referenced by other entities that are to be written.

Definition at line 2257 of file WriteHDF5Parallel.cpp.

2258 {
2259  ErrorCode rval;
2260 
2261  // For each entity owned on the interface, write its file id to
2262  // a tag. The sets of entities to be written should already contain
2263  // only owned entities so by intersecting with them we not only
2264  // filter by entities to be written, but also restrict to entities
2265  // owned by the proc
2266 
2267  // Get list of interface entities
2268  Range imesh, tmp;
2269  for( std::list< ExportSet >::reverse_iterator i = exportList.rbegin(); i != exportList.rend(); ++i )
2270  {
2271  tmp.clear();
2272  rval = myPcomm->filter_pstatus( i->range, PSTATUS_SHARED, PSTATUS_AND, -1, &tmp );
2273  if( MB_SUCCESS != rval ) return error( rval );
2274  imesh.merge( tmp );
2275  }
2276  tmp.clear();
2278  if( MB_SUCCESS != rval ) return error( rval );
2279  imesh.merge( tmp );
2280 
2281  // Create tag to store file IDs
2282  EntityHandle default_val = 0;
2283  Tag file_id_tag = 0;
2284  rval = iFace->tag_get_handle( "__hdf5_ll_fileid", 1, MB_TYPE_HANDLE, file_id_tag, MB_TAG_DENSE | MB_TAG_CREAT,
2285  &default_val );
2286  if( MB_SUCCESS != rval ) return error( rval );
2287 
2288  // Copy file IDs into tag
2289  std::vector< EntityHandle > file_id_vect( imesh.size() );
2290  Range::const_iterator i;
2291  std::vector< EntityHandle >::iterator j = file_id_vect.begin();
2292  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2293  {
2294  *j = idMap.find( *i );
2295  if( !*j )
2296  {
2297  iFace->tag_delete( file_id_tag );
2298  return error( MB_FAILURE );
2299  }
2300  }
2301  rval = iFace->tag_set_data( file_id_tag, imesh, &file_id_vect[0] );
2302  if( MB_SUCCESS != rval )
2303  {
2304  iFace->tag_delete( file_id_tag );
2305  return error( rval );
2306  }
2307 
2308  // Do communication
2309  rval = myPcomm->exchange_tags( file_id_tag, imesh );
2310  if( MB_SUCCESS != rval )
2311  {
2312  iFace->tag_delete( file_id_tag );
2313  return error( rval );
2314  }
2315 
2316  // Copy file IDs from tag into idMap for remote entities
2317  file_id_vect.resize( nonlocal.size() );
2318  rval = iFace->tag_get_data( file_id_tag, nonlocal, &file_id_vect[0] );
2319  if( MB_SUCCESS != rval )
2320  {
2321  iFace->tag_delete( file_id_tag );
2322  return error( rval );
2323  }
2324 
2325  j = file_id_vect.begin();
2326  for( i = nonlocal.begin(); i != nonlocal.end(); ++i, ++j )
2327  {
2328  if( *j == 0 )
2329  {
2330  int owner = -1;
2331  myPcomm->get_owner( *i, owner );
2332  const char* name = CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) );
2333  int id = ID_FROM_HANDLE( *i );
2334  MB_SET_ERR_CONT( "Process " << myPcomm->proc_config().proc_rank()
2335  << " did not receive valid id handle for shared " << name << " " << id
2336  << " owned by process " << owner );
2337  dbgOut.printf( 1,
2338  "Did not receive valid remote id for "
2339  "shared %s %d owned by process %d",
2340  name, id, owner );
2341  iFace->tag_delete( file_id_tag );
2342  return error( MB_FAILURE );
2343  }
2344  else
2345  {
2346  if( !idMap.insert( *i, *j, 1 ).second )
2347  {
2348  iFace->tag_delete( file_id_tag );
2349  return error( MB_FAILURE );
2350  }
2351  }
2352  }
2353 
2354 #ifndef NDEBUG
2355  // Check that writer is correct with regards to which entities
2356  // that it owns by verifying that the file ids that we thought
2357  // we were sending where not received instead
2358  file_id_vect.resize( imesh.size() );
2359  rval = iFace->tag_get_data( file_id_tag, imesh, &file_id_vect[0] );
2360  if( MB_SUCCESS != rval )
2361  {
2362  iFace->tag_delete( file_id_tag );
2363  return error( rval );
2364  }
2365  int invalid_count = 0;
2366  j = file_id_vect.begin();
2367  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2368  {
2369  EntityHandle h = idMap.find( *i );
2370  if( *j != h )
2371  {
2372  ++invalid_count;
2373  dbgOut.printf( 1, "Conflicting ownership for %s %ld\n", CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) ),
2374  (long)ID_FROM_HANDLE( *i ) );
2375  }
2376  }
2377  if( invalid_count )
2378  {
2379  iFace->tag_delete( file_id_tag );
2380  MB_SET_ERR( MB_FAILURE, invalid_count << " entities with conflicting ownership found by process "
2381  << myPcomm->proc_config().proc_rank()
2382  << ". This will result in duplicate entities written to file" );
2383  }
2384 #endif
2385 
2386  return iFace->tag_delete( file_id_tag );
2387 }

References moab::Range::begin(), moab::Range::clear(), moab::WriteHDF5::dbgOut, moab::Range::end(), moab::CN::EntityTypeName(), moab::error(), ErrorCode, moab::ParallelComm::exchange_tags(), moab::WriteHDF5::exportList, moab::ParallelComm::filter_pstatus(), moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_owner(), moab::ID_FROM_HANDLE(), moab::WriteHDF5::idMap, moab::WriteHDF5::iFace, moab::RangeMap< KeyType, ValType, NullVal >::insert(), MB_SET_ERR, MB_SET_ERR_CONT, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TYPE_HANDLE, moab::Range::merge(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::printf(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), PSTATUS_AND, PSTATUS_SHARED, moab::WriteHDF5::ExportSet::range, moab::Range::size(), moab::Interface::tag_delete(), moab::Interface::tag_get_data(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), and moab::TYPE_FROM_HANDLE().

Referenced by parallel_create_file().

◆ factory()

WriterIface * moab::WriteHDF5Parallel::factory ( Interface iface)
static

Definition at line 274 of file WriteHDF5Parallel.cpp.

275 {
276  return new WriteHDF5Parallel( iface );
277 }

References iface, and WriteHDF5Parallel().

Referenced by moab::ReaderWriterSet::ReaderWriterSet().

◆ gather_interface_meshes()

ErrorCode moab::WriteHDF5Parallel::gather_interface_meshes ( Range non_local_ents)
protected

Figure out which mesh local mesh is duplicated on remote processors and which processor will write that mesh.

Parameters
non_local_entsOutput list of entities that are not to be written by this processor but are referenced by other entities that are to be written.

Definition at line 295 of file WriteHDF5Parallel.cpp.

296 {
297  ErrorCode result;
298 
299  // START_SERIAL;
300  dbgOut.print( 3, "Pre-interface mesh:\n" );
301  dbgOut.print( 3, nodeSet.range );
302  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
303  dbgOut.print( 3, eiter->range );
304  dbgOut.print( 3, setSet.range );
305 
306  // Move handles of non-owned entities from lists of entities
307  // that this processor will write to the 'nonowned' list.
308 
309  nonowned.clear();
310  result = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &nonowned );
311  if( MB_SUCCESS != result ) return error( result );
312  nodeSet.range = subtract( nodeSet.range, nonowned );
313 
314  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
315  {
316  Range tmpset;
317  result = myPcomm->filter_pstatus( eiter->range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &tmpset );
318  if( MB_SUCCESS != result ) return error( result );
319  eiter->range = subtract( eiter->range, tmpset );
320  nonowned.merge( tmpset );
321  }
322 
323  dbgOut.print( 3, "Post-interface mesh:\n" );
324  dbgOut.print( 3, nodeSet.range );
325  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
326  dbgOut.print( 3, eiter->range );
327  dbgOut.print( 3, setSet.range );
328 
329  // END_SERIAL;
330 
331  return MB_SUCCESS;
332 }

References moab::Range::clear(), moab::WriteHDF5::dbgOut, moab::error(), ErrorCode, moab::WriteHDF5::exportList, moab::ParallelComm::filter_pstatus(), MB_SUCCESS, moab::Range::merge(), myPcomm, moab::WriteHDF5::nodeSet, moab::DebugOutput::print(), PSTATUS_AND, PSTATUS_NOT_OWNED, moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::setSet, and moab::subtract().

Referenced by parallel_create_file().

◆ get_sharedset_tags()

ErrorCode moab::WriteHDF5Parallel::get_sharedset_tags ( )
protected

get any existing tags which aren't excluded and add to shared set tags

◆ negotiate_type_list()

ErrorCode moab::WriteHDF5Parallel::negotiate_type_list ( )
protected

Communicate with other processors to negotiate the types of elements that will be written (the union of the types defined on each proc.)

Definition at line 1228 of file WriteHDF5Parallel.cpp.

1229 {
1230  int result;
1231  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1232 
1233  exportList.sort();
1234  int num_types = exportList.size();
1235 
1236  // Get list of types on this processor
1237  typedef std::vector< std::pair< int, int > > typelist;
1238  typelist my_types( num_types );
1239  (void)VALGRIND_MAKE_VEC_UNDEFINED( my_types );
1240  typelist::iterator viter = my_types.begin();
1241  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
1242  {
1243  viter->first = eiter->type;
1244  viter->second = eiter->num_nodes;
1245  ++viter;
1246  }
1247 
1248  dbgOut.print( 2, "Local Element Types:\n" );
1249  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1250  {
1251  int type = viter->first;
1252  int count = viter->second;
1253  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)type ), count );
1254  }
1255 
1256  // Broadcast number of types from root to all nodes
1257  int num_types0 = num_types;
1258  result = MPI_Bcast( &num_types0, 1, MPI_INT, 0, comm );
1259  CHECK_MPI( result );
1260  // Broadcast type list from root to all nodes
1261  typelist root_types( num_types0 );
1262  if( 0 == myPcomm->proc_config().proc_rank() ) root_types = my_types;
1263  result = MPI_Bcast( (void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm );
1264  CHECK_MPI( result );
1265 
1266  // Build local list of any types that root did not know about
1267  typelist non_root_types;
1268  viter = root_types.begin();
1269  for( typelist::iterator iter = my_types.begin(); iter != my_types.end(); ++iter )
1270  {
1271  if( viter == root_types.end() || *viter != *iter )
1272  non_root_types.push_back( *iter );
1273  else
1274  ++viter;
1275  }
1276 
1277  // Determine if any process had types not defined on the root
1278  int non_root_count = non_root_types.size();
1279  int not_done;
1280  result = MPI_Allreduce( &non_root_count, &not_done, 1, MPI_INT, MPI_LOR, comm );
1281  CHECK_MPI( result );
1282  if( not_done )
1283  {
1284  // Get number of types each processor has that root does not
1285  std::vector< int > counts( myPcomm->proc_config().proc_size() );
1286  int two_count = 2 * non_root_count;
1287  result = MPI_Gather( &two_count, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, comm );
1288  CHECK_MPI( result );
1289 
1290  // Get list of types from each processor
1291  std::vector< int > displs( myPcomm->proc_config().proc_size() + 1 );
1292  (void)VALGRIND_MAKE_VEC_UNDEFINED( displs );
1293  displs[0] = 0;
1294  for( unsigned long i = 1; i <= myPcomm->proc_config().proc_size(); ++i )
1295  displs[i] = displs[i - 1] + counts[i - 1];
1296  int total = displs[myPcomm->proc_config().proc_size()];
1297  typelist alltypes( total / 2 );
1298  (void)VALGRIND_MAKE_VEC_UNDEFINED( alltypes );
1299  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &non_root_types[0], non_root_types.size() * sizeof( int ) );
1300  result = MPI_Gatherv( (void*)&non_root_types[0], 2 * non_root_count, MPI_INT, (int*)&alltypes[0], &counts[0],
1301  &displs[0], MPI_INT, 0, comm );
1302  CHECK_MPI( result );
1303 
1304  // Merge type lists.
1305  // Prefer O(n) insertions with O(ln n) search time because
1306  // we expect data from a potentially large number of processes,
1307  // but with a small total number of element types.
1308  if( 0 == myPcomm->proc_config().proc_rank() )
1309  {
1310  for( viter = alltypes.begin(); viter != alltypes.end(); ++viter )
1311  {
1312  typelist::iterator titer = std::lower_bound( my_types.begin(), my_types.end(), *viter );
1313  if( titer == my_types.end() || *titer != *viter ) my_types.insert( titer, *viter );
1314  }
1315 
1316  dbgOut.print( 2, "Global Element Types:\n" );
1317  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1318  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)viter->first ), viter->second );
1319  }
1320 
1321  // Send total number of types to each processor
1322  total = my_types.size();
1323  result = MPI_Bcast( &total, 1, MPI_INT, 0, comm );
1324  CHECK_MPI( result );
1325 
1326  // Send list of types to each processor
1327  my_types.resize( total );
1328  result = MPI_Bcast( (void*)&my_types[0], 2 * total, MPI_INT, 0, comm );
1329  CHECK_MPI( result );
1330  }
1331  else
1332  {
1333  // Special case: if root had types but some subset of procs did not
1334  // have those types, but there are no types that the root doesn't
1335  // know about then we still need to update processes that are missing
1336  // types.
1337  my_types.swap( root_types );
1338  }
1339 
1340  // Insert missing types into exportList, with an empty
1341  // range of entities to export.
1342  std::list< ExportSet >::iterator ex_iter = exportList.begin();
1343  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1344  {
1345  while( ex_iter != exportList.end() && *ex_iter < *viter )
1346  ++ex_iter;
1347 
1348  if( ex_iter == exportList.end() || !( *ex_iter == *viter ) )
1349  {
1350  ExportSet insert;
1351  insert.type = (EntityType)viter->first;
1352  insert.num_nodes = viter->second;
1353  insert.first_id = 0;
1354  insert.offset = 0;
1355  insert.adj_offset = 0;
1356  ex_iter = exportList.insert( ex_iter, insert );
1357  }
1358  }
1359 
1360  return MB_SUCCESS;
1361 }

References moab::WriteHDF5::ExportSet::adj_offset, CHECK_MPI, moab::WriteHDF5::dbgOut, moab::CN::EntityTypeName(), moab::WriteHDF5::exportList, moab::WriteHDF5::ExportSet::first_id, MB_SUCCESS, myPcomm, moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::offset, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::ProcConfig::proc_size(), moab::WriteHDF5::ExportType::type, VALGRIND_CHECK_MEM_IS_DEFINED, and moab::VALGRIND_MAKE_VEC_UNDEFINED().

Referenced by parallel_create_file().

◆ pack_set()

ErrorCode moab::WriteHDF5Parallel::pack_set ( Range::const_iterator  set,
unsigned long *  set_data,
size_t  set_data_length 
)
protected

Pack set data for communication.

If set_data_length is insufficient for the set data, the length entries at indices 1, 2, and 3 of set_data will be set with the necessary lengths, but no data will be written to set_data beyond that.

Definition at line 1669 of file WriteHDF5Parallel.cpp.

1670 {
1671  ErrorCode rval;
1672  const EntityHandle* ptr;
1673  int len;
1674  unsigned char flags;
1675  std::vector< wid_t > tmp;
1676  size_t newlen;
1677 
1678  // Buffer must always contain at least flags and desired sizes
1679  assert( buffer_size >= 4 );
1680  buffer_size -= 4;
1681 
1682  Range::const_iterator nd = it;
1683  ++nd;
1684  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CONTENTS, &len, &flags );
1685  CHECK_MB( rval );
1686 
1687  // Tag mattag;
1688  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1689  // int block;
1690  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*it, 1, &block))
1691  // block = 0;
1692  //
1693  // if (block) {
1694  // std::vector<int> ids;
1695  // get_global_ids(iFace, ptr, len, flags, ids);
1696  //}
1697 
1698  if( len && !( flags & MESHSET_ORDERED ) )
1699  {
1700  tmp.clear();
1701  bool blocked = false;
1702  assert( ( 0 == len % 2 ) );
1703  rval = range_to_blocked_list( ptr, len / 2, tmp, blocked );
1704  CHECK_MB( rval );
1705  if( blocked ) flags |= mhdf_SET_RANGE_BIT;
1706  }
1707  else
1708  {
1709  tmp.resize( len );
1710  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1711  CHECK_MB( rval );
1712  tmp.resize( newlen );
1713  }
1714 
1715  buffer[0] = flags;
1716  buffer[1] = tmp.size();
1717  if( tmp.size() <= buffer_size ) std::copy( tmp.begin(), tmp.end(), buffer + 4 );
1718 
1719  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CHILDREN, &len );
1720  CHECK_MB( rval );
1721  tmp.resize( len );
1722  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1723  tmp.resize( newlen );
1724  buffer[2] = tmp.size();
1725  if( tmp.size() <= buffer_size - buffer[1] ) std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] );
1726 
1727  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::PARENTS, &len );
1728  CHECK_MB( rval );
1729  tmp.resize( len );
1730  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1731  tmp.resize( newlen );
1732  buffer[3] = tmp.size();
1733  if( tmp.size() <= buffer_size - buffer[1] - buffer[2] )
1734  std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] + buffer[2] );
1735 
1736  return MB_SUCCESS;
1737 }

References buffer, CHECK_MB, moab::WriteUtilIface::CHILDREN, moab::WriteUtilIface::CONTENTS, ErrorCode, moab::WriteUtilIface::get_entity_list_pointers(), MB_SUCCESS, mhdf_SET_RANGE_BIT, moab::WriteUtilIface::PARENTS, moab::WriteHDF5::range_to_blocked_list(), moab::WriteHDF5::vector_to_id_list(), and moab::WriteHDF5::writeUtil.

Referenced by communicate_shared_set_data().

◆ parallel_create_file()

ErrorCode moab::WriteHDF5Parallel::parallel_create_file ( const char *  filename,
bool  overwrite,
const std::vector< std::string > &  qa_records,
const FileOptions opts,
const Tag user_tag_list = 0,
int  user_tag_count = 0,
int  dimension = 3,
double *  times = 0 
)
protectedvirtual

Called by normal (non-parallel) writer. Sets up necessary data for parallel write.

Reimplemented from moab::WriteHDF5.

Definition at line 334 of file WriteHDF5Parallel.cpp.

342 {
343  ErrorCode rval;
344  mhdf_Status status;
345 
346  int pcomm_no = 0;
347  opts.get_int_option( "PARALLEL_COMM", pcomm_no );
348 
349  myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
350  if( 0 == myPcomm )
351  {
352  myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD );
353  pcommAllocated = true;
354  }
355 
356  MPI_Info info = MPI_INFO_NULL;
357  std::string cb_size;
358  rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
359  if( MB_SUCCESS == rval )
360  {
361  MPI_Info_create( &info );
362  MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
363  }
364 
367  Range nonlocal;
368  debug_barrier();
369  dbgOut.tprint( 1, "Gathering interface meshes\n" );
370  rval = gather_interface_meshes( nonlocal );
371  if( MB_SUCCESS != rval ) return error( rval );
372 
373  /**************** Get tag names for sets likely to be shared ***********/
374  // debug_barrier();
375  // dbgOut.tprint(1, "Getting shared entity sets\n");
376  // rval = get_sharedset_tags();
377  // if (MB_SUCCESS != rval) return error(rval);
378 
379  /**************** Create actual file and write meta info ***************/
380 
381  debug_barrier();
382  if( myPcomm->proc_config().proc_rank() == 0 )
383  {
384  dbgOut.tprintf( 1, "Creating file: %s\n", filename );
385 
386  // Create the file
387  const char* type_names[MBMAXTYPE];
388  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
389  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
390  type_names[i] = CN::EntityTypeName( i );
391 
392  dbgOut.tprint( 1, "call mhdf_createFile\n" );
393  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
394  if( !filePtr )
395  {
396  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
397  }
398 
399  dbgOut.tprint( 1, "call write_qa\n" );
400  rval = write_qa( qa_records );
401  if( MB_SUCCESS != rval ) return error( rval );
402  }
403 
404  /**************** Create node coordinate table ***************/
405  CpuTimer timer;
406  debug_barrier();
407  dbgOut.tprint( 1, "creating node table\n" );
408  topState.start( "creating node table" );
409  rval = create_node_table( dimension );
410  topState.end( rval );
411  if( MB_SUCCESS != rval ) return error( rval );
412  if( times ) times[CREATE_NODE_TIME] = timer.time_elapsed();
413 
414  /**************** Create element tables ***************/
415 
416  debug_barrier();
417  dbgOut.tprint( 1, "negotiating element types\n" );
418  topState.start( "negotiating element types" );
419  rval = negotiate_type_list();
420  topState.end( rval );
421  if( MB_SUCCESS != rval ) return error( rval );
422  if( times ) times[NEGOTIATE_TYPES_TIME] = timer.time_elapsed();
423  dbgOut.tprint( 1, "creating element table\n" );
424  topState.start( "creating element tables" );
425  rval = create_element_tables();
426  topState.end( rval );
427  if( MB_SUCCESS != rval ) return error( rval );
428  if( times ) times[CREATE_ELEM_TIME] = timer.time_elapsed();
429 
430  /*************** Exchange file IDs *****************/
431 
432  debug_barrier();
433  dbgOut.tprint( 1, "communicating file ids\n" );
434  topState.start( "communicating file ids" );
435  rval = exchange_file_ids( nonlocal );
436  topState.end( rval );
437  if( MB_SUCCESS != rval ) return error( rval );
438  if( times ) times[FILEID_EXCHANGE_TIME] = timer.time_elapsed();
439 
440  /**************** Create meshset tables *********************/
441 
442  debug_barrier();
443  dbgOut.tprint( 1, "creating meshset table\n" );
444  topState.start( "creating meshset tables" );
445  rval = create_meshset_tables( times );
446  topState.end( rval );
447  if( MB_SUCCESS != rval ) return error( rval );
448  if( times ) times[CREATE_SET_TIME] = timer.time_elapsed();
449 
450  /**************** Create adjacency tables *********************/
451 
452  debug_barrier();
453  dbgOut.tprint( 1, "creating adjacency table\n" );
454  topState.start( "creating adjacency tables" );
455  rval = create_adjacency_tables();
456  topState.end( rval );
457  if( MB_SUCCESS != rval ) return error( rval );
458  if( times ) times[CREATE_ADJ_TIME] = timer.time_elapsed();
459 
460  /**************** Create tag data *********************/
461 
462  debug_barrier();
463  dbgOut.tprint( 1, "creating tag tables\n" );
464  topState.start( "creating tag tables" );
465  rval = gather_tags( user_tag_list, user_tag_count );
466  if( MB_SUCCESS != rval ) return error( rval );
467  rval = create_tag_tables();
468  topState.end( rval );
469  if( MB_SUCCESS != rval ) return error( rval );
470  if( times ) times[CREATE_TAG_TIME] = timer.time_elapsed();
471 
472  /************** Close serial file and reopen parallel *****************/
473 
474  if( 0 == myPcomm->proc_config().proc_rank() ) mhdf_closeFile( filePtr, &status );
475 
476  MPI_Barrier( myPcomm->proc_config().proc_comm() );
477  dbgOut.tprint( 1, "(re)opening file in parallel mode\n" );
478  unsigned long junk;
479  hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS );
480  H5Pset_fapl_mpio( hdf_opt, myPcomm->proc_config().proc_comm(), info );
481  filePtr = mhdf_openFileWithOpt( filename, 1, &junk, id_type, hdf_opt, &status );
482  H5Pclose( hdf_opt );
483  if( !filePtr )
484  {
485  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
486  }
487 
488  if( collectiveIO )
489  {
490  dbgOut.print( 1, "USING COLLECTIVE IO\n" );
491  writeProp = H5Pcreate( H5P_DATASET_XFER );
492  H5Pset_dxpl_mpio( writeProp, H5FD_MPIO_COLLECTIVE );
493  }
494 
495  /* Test if we can use H5S_APPEND when selecting hyperslabs */
496  if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
497  ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
498  {
499  dbgOut.print( 1, "HDF5 library supports H5Sselect_hyperlsab with H5S_SELECT_APPEND\n" );
500  hslabOp = H5S_SELECT_APPEND;
501  }
502 
503  dbgOut.tprint( 1, "Exiting parallel_create_file\n" );
504  return MB_SUCCESS;
505 }

References moab::WriteHDF5::collectiveIO, moab::WriteHDF5::CREATE_ADJ_TIME, create_adjacency_tables(), moab::WriteHDF5::CREATE_ELEM_TIME, create_element_tables(), create_meshset_tables(), create_node_table(), moab::WriteHDF5::CREATE_NODE_TIME, moab::WriteHDF5::CREATE_SET_TIME, create_tag_tables(), moab::WriteHDF5::CREATE_TAG_TIME, moab::WriteHDF5::dbgOut, debug_barrier, moab::CN::EntityTypeName(), moab::error(), ErrorCode, exchange_file_ids(), moab::WriteHDF5::FILEID_EXCHANGE_TIME, moab::WriteHDF5::filePtr, gather_interface_meshes(), moab::WriteHDF5::gather_tags(), moab::FileOptions::get_int_option(), moab::FileOptions::get_null_option(), moab::ParallelComm::get_pcomm(), moab::FileOptions::get_str_option(), moab::HDF5_can_append_hyperslabs(), hslabOp, moab::WriteHDF5::id_type, moab::WriteHDF5::iFace, moab::DebugOutput::limit_output_to_first_N_procs(), MB_SET_ERR, MB_SUCCESS, MBEDGE, MBENTITYSET, MBMAXTYPE, mhdf_closeFile(), mhdf_createFile(), mhdf_message(), mhdf_openFileWithOpt(), myPcomm, negotiate_type_list(), moab::WriteHDF5::NEGOTIATE_TYPES_TIME, pcommAllocated, moab::DebugOutput::print(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::DebugOutput::set_rank(), moab::CpuTimer::time_elapsed(), moab::WriteHDF5::topState, moab::DebugOutput::tprint(), moab::DebugOutput::tprintf(), moab::WriteHDF5::write_qa(), and moab::WriteHDF5::writeProp.

◆ print_set_sharing_data()

void moab::WriteHDF5Parallel::print_set_sharing_data ( const Range range,
const char *  label,
Tag  idt 
)
protected

Definition at line 1453 of file WriteHDF5Parallel.cpp.

1454 {
1455  dbgOut.printf( SSVB, "set\tid\towner\t%-*s\tfid\tshared\n", (int)( sizeof( EntityHandle ) * 2 ), "handle" );
1456  for( Range::iterator it = range.begin(); it != range.end(); ++it )
1457  {
1458  int id;
1459  iFace->tag_get_data( idt, &*it, 1, &id );
1460  EntityHandle handle = 0;
1461  unsigned owner = 0;
1462  wid_t file_id = 0;
1463  myPcomm->get_entityset_owner( *it, owner, &handle );
1464  if( !idMap.find( *it, file_id ) ) file_id = 0;
1465  dbgOut.printf( SSVB, "%s\t%d\t%u\t%lx\t%lu\t", label, id, owner, (unsigned long)handle,
1466  (unsigned long)file_id );
1467  std::vector< unsigned > procs;
1468  myPcomm->get_entityset_procs( *it, procs );
1469  if( procs.empty() )
1470  dbgOut.print( SSVB, "<none>\n" );
1471  else
1472  {
1473  for( unsigned i = 0; i < procs.size() - 1; ++i )
1474  dbgOut.printf( SSVB, "%u,", procs[i] );
1475  dbgOut.printf( SSVB, "%u\n", procs.back() );
1476  }
1477  }
1478 }

References moab::Range::begin(), moab::WriteHDF5::dbgOut, moab::Range::end(), moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::ParallelComm::get_entityset_owner(), moab::ParallelComm::get_entityset_procs(), moab::WriteHDF5::idMap, moab::WriteHDF5::iFace, myPcomm, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::SSVB, and moab::Interface::tag_get_data().

Referenced by print_shared_sets().

◆ print_shared_sets()

void moab::WriteHDF5Parallel::print_shared_sets ( )
protected

Definition at line 1480 of file WriteHDF5Parallel.cpp.

1481 {
1482  const char* tag_names[][2] = { { MATERIAL_SET_TAG_NAME, "block" },
1483  { DIRICHLET_SET_TAG_NAME, "nodeset" },
1484  { NEUMANN_SET_TAG_NAME, "sideset" },
1485  { 0, 0 } };
1486 
1487  for( int i = 0; tag_names[i][0]; ++i )
1488  {
1489  Tag tag;
1490  if( MB_SUCCESS != iFace->tag_get_handle( tag_names[i][0], 1, MB_TYPE_INTEGER, tag ) ) continue;
1491 
1492  Range tagged;
1493  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, tagged );
1494  print_set_sharing_data( tagged, tag_names[i][1], tag );
1495  }
1496 
1497  Tag geom, id;
1499  id = iFace->globalId_tag();
1500 
1501  const char* geom_names[] = { "vertex", "curve", "surface", "volume" };
1502  for( int d = 0; d <= 3; ++d )
1503  {
1504  Range tagged;
1505  const void* vals[] = { &d };
1506  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &geom, vals, 1, tagged );
1507  print_set_sharing_data( tagged, geom_names[d], id );
1508  }
1509 }

References DIRICHLET_SET_TAG_NAME, GEOM_DIMENSION_TAG_NAME, moab::Interface::get_entities_by_type_and_tag(), moab::Interface::globalId_tag(), moab::WriteHDF5::iFace, MATERIAL_SET_TAG_NAME, MB_SUCCESS, MB_TYPE_INTEGER, MBENTITYSET, NEUMANN_SET_TAG_NAME, print_set_sharing_data(), and moab::Interface::tag_get_handle().

Referenced by communicate_shared_set_ids().

◆ print_times()

void moab::WriteHDF5Parallel::print_times ( const double *  times) const
protectedvirtual

Definition at line 2389 of file WriteHDF5Parallel.cpp.

2390 {
2391  if( !myPcomm )
2392  {
2393  WriteHDF5::print_times( times );
2394  }
2395  else
2396  {
2397  double recv[NUM_TIMES];
2398  MPI_Reduce( (void*)times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
2399  if( 0 == myPcomm->proc_config().proc_rank() ) WriteHDF5::print_times( recv );
2400  }
2401 }

References myPcomm, moab::WriteHDF5::NUM_TIMES, moab::WriteHDF5::print_times(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), and moab::ProcConfig::proc_rank().

◆ remove_remote_entities() [1/2]

void moab::WriteHDF5Parallel::remove_remote_entities ( EntityHandle  relative,
Range range 
)
protected

Remove any remote mesh entities from the passed range.

Definition at line 2194 of file WriteHDF5Parallel.cpp.

2195 {
2196  Range result;
2197  result.merge( intersect( range, nodeSet.range ) );
2198  result.merge( intersect( range, setSet.range ) );
2199  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
2200  result.merge( intersect( range, eiter->range ) );
2201 
2202  // result.merge(intersect(range, myParallelSets));
2203  Range sets;
2204  int junk;
2205  sets.merge( Range::lower_bound( range.begin(), range.end(), CREATE_HANDLE( MBENTITYSET, 0, junk ) ), range.end() );
2206  remove_remote_sets( relative, sets );
2207  result.merge( sets );
2208  range.swap( result );
2209 }

References moab::Range::begin(), moab::CREATE_HANDLE(), moab::Range::end(), moab::WriteHDF5::exportList, moab::intersect(), moab::Range::lower_bound(), MBENTITYSET, moab::Range::merge(), moab::WriteHDF5::nodeSet, moab::WriteHDF5::ExportSet::range, remove_remote_sets(), moab::WriteHDF5::setSet, and moab::Range::swap().

Referenced by remove_remote_entities().

◆ remove_remote_entities() [2/2]

void moab::WriteHDF5Parallel::remove_remote_entities ( EntityHandle  relative,
std::vector< EntityHandle > &  vect 
)
protected

Definition at line 2219 of file WriteHDF5Parallel.cpp.

2220 {
2221  Range intrsct;
2222  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2223  intrsct.insert( *iter );
2224  remove_remote_entities( relative, intrsct );
2225 
2226  unsigned int read, write;
2227  for( read = write = 0; read < vect.size(); ++read )
2228  {
2229  if( intrsct.find( vect[read] ) != intrsct.end() )
2230  {
2231  if( read != write ) vect[write] = vect[read];
2232  ++write;
2233  }
2234  }
2235  if( write != vect.size() ) vect.resize( write );
2236 }

References moab::Range::end(), moab::Range::find(), moab::Range::insert(), and remove_remote_entities().

◆ remove_remote_sets() [1/2]

void moab::WriteHDF5Parallel::remove_remote_sets ( EntityHandle  relative,
Range range 
)
protected

Definition at line 2211 of file WriteHDF5Parallel.cpp.

2212 {
2213  Range result( intersect( range, setSet.range ) );
2214  // Store the non-intersecting entities separately if needed
2215  // Range remaining(subtract(range, result));
2216  range.swap( result );
2217 }

References moab::intersect(), moab::WriteHDF5::ExportSet::range, moab::WriteHDF5::setSet, and moab::Range::swap().

Referenced by remove_remote_entities(), and remove_remote_sets().

◆ remove_remote_sets() [2/2]

void moab::WriteHDF5Parallel::remove_remote_sets ( EntityHandle  relative,
std::vector< EntityHandle > &  vect 
)
protected

Definition at line 2238 of file WriteHDF5Parallel.cpp.

2239 {
2240  Range intrsct;
2241  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2242  intrsct.insert( *iter );
2243  remove_remote_sets( relative, intrsct );
2244 
2245  unsigned int read, write;
2246  for( read = write = 0; read < vect.size(); ++read )
2247  {
2248  if( intrsct.find( vect[read] ) != intrsct.end() )
2249  {
2250  if( read != write ) vect[write] = vect[read];
2251  ++write;
2252  }
2253  }
2254  if( write != vect.size() ) vect.resize( write );
2255 }

References moab::Range::end(), moab::Range::find(), moab::Range::insert(), and remove_remote_sets().

◆ unpack_set()

ErrorCode moab::WriteHDF5Parallel::unpack_set ( EntityHandle  set,
const unsigned long *  set_data,
size_t  set_data_length 
)
protected

Unpack set data from communication.

Definition at line 1792 of file WriteHDF5Parallel.cpp.

1793 {
1794  // Use local variables for readability
1795  assert( buffer_size >= 4 );
1796  assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size );
1797  const unsigned long flags = buffer[0];
1798  unsigned long num_content = buffer[1];
1799  const unsigned long num_child = buffer[2];
1800  const unsigned long num_parent = buffer[3];
1801  const unsigned long* contents = buffer + 4;
1802  const unsigned long* children = contents + num_content;
1803  const unsigned long* parents = children + num_child;
1804 
1805  SpecialSetData* data = find_set_data( set );
1806  assert( NULL != data );
1807  if( NULL == data ) return MB_FAILURE;
1808 
1809  // Tag mattag;
1810  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1811  // int block;
1812  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &set, 1, &block))
1813  // block = 0;
1814 
1815  // If either the current data or the new data is in ranged format,
1816  // then change the other to ranged format if it isn't already
1817  // in both cases when they differ, the data will end up "compressed range"
1818  std::vector< wid_t > tmp;
1819  if( ( flags & mhdf_SET_RANGE_BIT ) != ( data->setFlags & mhdf_SET_RANGE_BIT ) )
1820  {
1821  if( flags & mhdf_SET_RANGE_BIT )
1822  {
1823  tmp = data->contentIds;
1824  convert_to_ranged_ids( &tmp[0], tmp.size(), data->contentIds );
1825  data->setFlags |= mhdf_SET_RANGE_BIT;
1826  }
1827  else
1828  {
1829  tmp.clear();
1830  convert_to_ranged_ids( contents, num_content, tmp );
1831  num_content = tmp.size();
1832  if( sizeof( wid_t ) < sizeof( long ) )
1833  {
1834  size_t old_size = tmp.size();
1835  tmp.resize( sizeof( long ) * old_size / sizeof( wid_t ) );
1836  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1837  for( long i = ( (long)old_size ) - 1; i >= 0; --i )
1838  array[i] = tmp[i];
1839  contents = array;
1840  }
1841  else if( sizeof( wid_t ) > sizeof( long ) )
1842  {
1843  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1844  std::copy( tmp.begin(), tmp.end(), array );
1845  }
1846  contents = reinterpret_cast< unsigned long* >( &tmp[0] );
1847  }
1848  }
1849 
1850  if( data->setFlags & mhdf_SET_RANGE_BIT )
1851  merge_ranged_ids( contents, num_content, data->contentIds );
1852  else
1853  merge_vector_ids( contents, num_content, data->contentIds );
1854 
1855  merge_vector_ids( children, num_child, data->childIds );
1856  merge_vector_ids( parents, num_parent, data->parentIds );
1857  return MB_SUCCESS;
1858 }

References buffer, moab::WriteHDF5::SpecialSetData::childIds, children, moab::WriteHDF5::SpecialSetData::contentIds, moab::convert_to_ranged_ids(), moab::WriteHDF5::find_set_data(), MB_SUCCESS, moab::merge_ranged_ids(), moab::merge_vector_ids(), mhdf_SET_RANGE_BIT, moab::WriteHDF5::SpecialSetData::parentIds, and moab::WriteHDF5::SpecialSetData::setFlags.

Referenced by communicate_shared_set_data().

Member Data Documentation

◆ hslabOp

H5S_seloper_t moab::WriteHDF5Parallel::hslabOp
private

Operation to use to append hyperslab selections.

Definition at line 186 of file WriteHDF5Parallel.hpp.

Referenced by parallel_create_file().

◆ myPcomm

◆ pcommAllocated

bool moab::WriteHDF5Parallel::pcommAllocated
private

whether this instance allocated (and dtor should delete) the pcomm

Definition at line 183 of file WriteHDF5Parallel.hpp.

Referenced by parallel_create_file(), and ~WriteHDF5Parallel().


The documentation for this class was generated from the following files: