Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
WriteHDF5Parallel.cpp
Go to the documentation of this file.
1 #undef DEBUG
2 #undef TIME_DEBUG
3 
4 #include <cstdarg>
5 #include <ctime>
6 #include <cstdlib>
7 
8 #include <cstring>
9 #include <cassert>
10 
11 #include <vector>
12 #include <set>
13 #include <map>
14 #include <utility>
15 #include <iostream>
16 #include <sstream>
17 #include <string>
18 
19 #include "moab/Interface.hpp"
20 #include "Internals.hpp"
21 #include "MBTagConventions.hpp"
22 #include "MBParallelConventions.h"
23 #include "moab/ParallelComm.hpp"
24 #include "moab/CN.hpp"
25 #include "moab/Range.hpp"
26 #include "moab/CpuTimer.hpp"
27 
28 #include "WriteHDF5Parallel.hpp"
29 
30 #ifndef MOAB_HAVE_HDF5
31 #error Attempt to compile WriteHDF5Parallel with HDF5 support disabled
32 #endif
33 
34 #include <H5Tpublic.h>
35 #include <H5Ppublic.h>
36 #include <H5FDmpi.h>
37 #include <H5FDmpio.h>
38 
39 #include "mhdf.h"
40 
41 #include "IODebugTrack.hpp"
42 #include "moab/FileOptions.hpp"
43 
44 namespace
45 {
46 template < bool Condition >
47 struct STATIC_ASSERTION;
48 template <>
49 struct STATIC_ASSERTION< true >
50 {
51 };
52 } // namespace
53 
54 #define PP_CAT_( a, b ) a##b
55 #define PP_CAT( a, b ) PP_CAT_( a, b )
56 #define STATIC_ASSERT( Condition ) \
57  enum \
58  { \
59  PP_CAT( dummy, __LINE__ ) = sizeof( ::STATIC_ASSERTION< (bool)( Condition ) > ) \
60  }
61 
62 namespace moab
63 {
64 
65 #ifndef _WIN32 // problematic for windows
66 // Need an MPI type that we can put handles in
67 STATIC_ASSERT( sizeof( unsigned long ) >= sizeof( EntityHandle ) );
68 
69 // Need an MPI type that we can put file IDs in
70 STATIC_ASSERT( sizeof( unsigned long ) >= sizeof( WriteHDF5::wid_t ) );
71 #endif
72 
73 // This function doesn't do anything useful. It's just a nice
74 // place to set a break point to determine why the reader fails.
75 static inline ErrorCode error( ErrorCode rval )
76 {
77  return rval;
78 }
79 
80 const char* mpi_err_str( int errorcode )
81 {
82  static char buffer[2048];
83  int len = sizeof( buffer );
84  MPI_Error_string( errorcode, buffer, &len );
85  buffer[std::min( (size_t)len, sizeof( buffer ) - 1 )] = '\0';
86  return buffer;
87 }
88 
89 #define MPI_FAILURE_MSG( A ) \
90  "MPI Failure at " __FILE__ ":%d : (Code %d) %s\n", __LINE__, (int)( A ), mpi_err_str( ( A ) )
91 
92 #define CHECK_MPI( A ) \
93  do \
94  { \
95  if( MPI_SUCCESS != ( A ) ) \
96  { \
97  MB_SET_ERR_CONT( "MPI Failure : (Code " << (int)( A ) << ") " << mpi_err_str( ( A ) ) ); \
98  dbgOut.printf( 1, MPI_FAILURE_MSG( ( A ) ) ); \
99  return error( MB_FAILURE ); \
100  } \
101  } while( false )
102 
103 #define MB_FAILURE_MSG( A ) "MOAB_Failure at " __FILE__ ":%d : %s (%d)\n", __LINE__, ErrorCodeStr[( A )], (int)( A )
104 
105 #define CHECK_MB( A ) \
106  do \
107  { \
108  if( MB_SUCCESS != ( A ) ) \
109  { \
110  MB_SET_ERR_CONT( "MOAB Failure : " << ErrorCodeStr[( A )] ); \
111  dbgOut.printf( 1, MB_FAILURE_MSG( ( A ) ) ); \
112  return error( A ); \
113  } \
114  } while( false )
115 
116 #define HDF_FAILURE_MSG( A ) "MHDF Failure at " __FILE__ ":%d : %s\n", __LINE__, mhdf_message( &( A ) )
117 
118 #define CHECK_HDF( A ) \
119  do \
120  { \
121  if( mhdf_isError( &( A ) ) ) \
122  { \
123  MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
124  dbgOut.printf( 1, HDF_FAILURE_MSG( ( A ) ) ); \
125  return error( MB_FAILURE ); \
126  } \
127  } while( false )
128 
129 #define CHECK_HDFN( A ) \
130  do \
131  { \
132  if( mhdf_isError( &( A ) ) ) \
133  { \
134  MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
135  return error( MB_FAILURE ); \
136  } \
137  } while( false )
138 
139 #ifdef VALGRIND
140 #include <valgrind/memcheck.h>
141 
142 template < typename T >
143 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v )
144 {
145  if( v.size() )
146  {
147  }
148  (void)VALGRIND_MAKE_MEM_UNDEFINED( &v[0], v.size() * sizeof( T ) );
149 }
150 
151 #else
152 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
153 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
154 #endif
155 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
156 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
157 #endif
158 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
159 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
160 #endif
161 
162 template < typename T >
163 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& )
164 {
165  /* Nothing to do */
166 }
167 
168 #endif
169 
170 #ifndef NDEBUG
171 #define START_SERIAL \
172  for( unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x ) \
173  { \
174  MPI_Barrier( myPcomm->proc_config().proc_comm() ); \
175  if( _x != myPcomm->proc_config().proc_rank() ) continue
176 #define END_SERIAL \
177  } \
178  MPI_Barrier( myPcomm->proc_config().proc_comm() )
179 #else
180 #define START_SERIAL
181 #define END_SERIAL
182 #endif
183 
184 static int my_Gatherv( void* sendbuf,
185  int sendcount,
186  MPI_Datatype sendtype,
187  std::vector< unsigned char >& recvbuf,
188  std::vector< int >& recvcounts,
189  int root,
190  MPI_Comm comm )
191 {
192  int nproc, rank, bytes, err;
193  MPI_Comm_size( comm, &nproc );
194  MPI_Comm_rank( comm, &rank );
195  MPI_Type_size( sendtype, &bytes );
196 
197  recvcounts.resize( rank == root ? nproc : 0 );
198  err = MPI_Gather( &sendcount, 1, MPI_INT, &recvcounts[0], 1, MPI_INT, root, comm );
199  if( MPI_SUCCESS != err ) return err;
200 
201  std::vector< int > disp( recvcounts.size() );
202  if( root == rank )
203  {
204  disp[0] = 0;
205  for( int i = 1; i < nproc; ++i )
206  disp[i] = disp[i - 1] + recvcounts[i - 1];
207  recvbuf.resize( bytes * ( disp.back() + recvcounts.back() ) );
208  }
209 
210  return MPI_Gatherv( sendbuf, sendcount, sendtype, &recvbuf[0], &recvcounts[0], &disp[0], sendtype, root, comm );
211 }
212 
213 static void print_type_sets( Interface* iFace, DebugOutput* str, Range& sets )
214 {
215  const unsigned VB = 2;
216  if( str->get_verbosity() < VB ) return;
217 
218  Tag gid, did, bid, sid, nid;
219  gid = iFace->globalId_tag();
224  Range typesets[10];
225  const char* typenames[] = { "Block ", "Sideset ", "NodeSet", "Vertex", "Curve",
226  "Surface", "Volume", "Body", "Other" };
227  for( Range::iterator riter = sets.begin(); riter != sets.end(); ++riter )
228  {
229  unsigned dim, id; //, oldsize;
230  if( MB_SUCCESS == iFace->tag_get_data( bid, &*riter, 1, &id ) )
231  dim = 0;
232  else if( MB_SUCCESS == iFace->tag_get_data( sid, &*riter, 1, &id ) )
233  dim = 1;
234  else if( MB_SUCCESS == iFace->tag_get_data( nid, &*riter, 1, &id ) )
235  dim = 2;
236  else if( MB_SUCCESS == iFace->tag_get_data( did, &*riter, 1, &dim ) )
237  {
238  id = 0;
239  iFace->tag_get_data( gid, &*riter, 1, &id );
240  dim += 3;
241  }
242  else
243  {
244  id = *riter;
245  dim = 9;
246  }
247 
248  // oldsize = typesets[dim].size();
249  typesets[dim].insert( id );
250  // assert(typesets[dim].size() - oldsize == 1);
251  }
252  for( int ii = 0; ii < 9; ++ii )
253  {
254  char tmp[64];
255  snprintf( tmp, 64, "%s (%lu) ", typenames[ii], (unsigned long)typesets[ii].size() );
256  str->print( VB, tmp, typesets[ii] );
257  }
258  str->printf( VB, "Total: %lu\n", (unsigned long)sets.size() );
259 }
260 
261 #define debug_barrier() debug_barrier_line( __LINE__ )
262 
264 {
265  const unsigned threshold = 2;
266  static unsigned long count = 0;
267  if( dbgOut.get_verbosity() >= threshold && myPcomm )
268  {
269  dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
270  MPI_Barrier( myPcomm->proc_config().proc_comm() );
271  }
272 }
273 
275 {
276  return new WriteHDF5Parallel( iface );
277 }
278 
280  : WriteHDF5( iface ), myPcomm( NULL ), pcommAllocated( false ), hslabOp( H5S_SELECT_OR )
281 {
282 }
283 
285 {
286  if( pcommAllocated && myPcomm ) delete myPcomm;
287 }
288 
289 // The parent WriteHDF5 class has ExportSet structs that are
290 // populated with the entities to be written, grouped by type
291 // (and for elements, connectivity length). This function:
292 // o determines which entities are to be written by a remote processor
293 // o removes those entities from the ExportSet structs in WriteMesh
294 // o passes them back in a Range
296 {
297  ErrorCode result;
298 
299  // START_SERIAL;
300  dbgOut.print( 3, "Pre-interface mesh:\n" );
301  dbgOut.print( 3, nodeSet.range );
302  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
303  dbgOut.print( 3, eiter->range );
304  dbgOut.print( 3, setSet.range );
305 
306  // Move handles of non-owned entities from lists of entities
307  // that this processor will write to the 'nonowned' list.
308 
309  nonowned.clear();
310  result = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &nonowned );
311  if( MB_SUCCESS != result ) return error( result );
312  nodeSet.range = subtract( nodeSet.range, nonowned );
313 
314  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
315  {
316  Range tmpset;
317  result = myPcomm->filter_pstatus( eiter->range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &tmpset );
318  if( MB_SUCCESS != result ) return error( result );
319  eiter->range = subtract( eiter->range, tmpset );
320  nonowned.merge( tmpset );
321  }
322 
323  dbgOut.print( 3, "Post-interface mesh:\n" );
324  dbgOut.print( 3, nodeSet.range );
325  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
326  dbgOut.print( 3, eiter->range );
327  dbgOut.print( 3, setSet.range );
328 
329  // END_SERIAL;
330 
331  return MB_SUCCESS;
332 }
333 
335  bool overwrite,
336  const std::vector< std::string >& qa_records,
337  const FileOptions& opts,
338  const Tag* user_tag_list,
339  int user_tag_count,
340  int dimension,
341  double* times )
342 {
343  ErrorCode rval;
344  mhdf_Status status;
345 
346  int pcomm_no = 0;
347  opts.get_int_option( "PARALLEL_COMM", pcomm_no );
348 
349  myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
350  if( 0 == myPcomm )
351  {
352  myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD );
353  pcommAllocated = true;
354  }
355 
356  MPI_Info info = MPI_INFO_NULL;
357  std::string cb_size;
358  rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
359  if( MB_SUCCESS == rval )
360  {
361  MPI_Info_create( &info );
362  MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
363  }
364 
367  Range nonlocal;
368  debug_barrier();
369  dbgOut.tprint( 1, "Gathering interface meshes\n" );
370  rval = gather_interface_meshes( nonlocal );
371  if( MB_SUCCESS != rval ) return error( rval );
372 
373  /**************** Get tag names for sets likely to be shared ***********/
374  // debug_barrier();
375  // dbgOut.tprint(1, "Getting shared entity sets\n");
376  // rval = get_sharedset_tags();
377  // if (MB_SUCCESS != rval) return error(rval);
378 
379  /**************** Create actual file and write meta info ***************/
380 
381  debug_barrier();
382  if( myPcomm->proc_config().proc_rank() == 0 )
383  {
384  dbgOut.tprintf( 1, "Creating file: %s\n", filename );
385 
386  // Create the file
387  const char* type_names[MBMAXTYPE];
388  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
389  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
390  type_names[i] = CN::EntityTypeName( i );
391 
392  dbgOut.tprint( 1, "call mhdf_createFile\n" );
393  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
394  if( !filePtr )
395  {
396  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
397  }
398 
399  dbgOut.tprint( 1, "call write_qa\n" );
400  rval = write_qa( qa_records );
401  if( MB_SUCCESS != rval ) return error( rval );
402  }
403 
404  /**************** Create node coordinate table ***************/
405  CpuTimer timer;
406  debug_barrier();
407  dbgOut.tprint( 1, "creating node table\n" );
408  topState.start( "creating node table" );
409  rval = create_node_table( dimension );
410  topState.end( rval );
411  if( MB_SUCCESS != rval ) return error( rval );
412  if( times ) times[CREATE_NODE_TIME] = timer.time_elapsed();
413 
414  /**************** Create element tables ***************/
415 
416  debug_barrier();
417  dbgOut.tprint( 1, "negotiating element types\n" );
418  topState.start( "negotiating element types" );
419  rval = negotiate_type_list();
420  topState.end( rval );
421  if( MB_SUCCESS != rval ) return error( rval );
422  if( times ) times[NEGOTIATE_TYPES_TIME] = timer.time_elapsed();
423  dbgOut.tprint( 1, "creating element table\n" );
424  topState.start( "creating element tables" );
425  rval = create_element_tables();
426  topState.end( rval );
427  if( MB_SUCCESS != rval ) return error( rval );
428  if( times ) times[CREATE_ELEM_TIME] = timer.time_elapsed();
429 
430  /*************** Exchange file IDs *****************/
431 
432  debug_barrier();
433  dbgOut.tprint( 1, "communicating file ids\n" );
434  topState.start( "communicating file ids" );
435  rval = exchange_file_ids( nonlocal );
436  topState.end( rval );
437  if( MB_SUCCESS != rval ) return error( rval );
438  if( times ) times[FILEID_EXCHANGE_TIME] = timer.time_elapsed();
439 
440  /**************** Create meshset tables *********************/
441 
442  debug_barrier();
443  dbgOut.tprint( 1, "creating meshset table\n" );
444  topState.start( "creating meshset tables" );
445  rval = create_meshset_tables( times );
446  topState.end( rval );
447  if( MB_SUCCESS != rval ) return error( rval );
448  if( times ) times[CREATE_SET_TIME] = timer.time_elapsed();
449 
450  /**************** Create adjacency tables *********************/
451 
452  debug_barrier();
453  dbgOut.tprint( 1, "creating adjacency table\n" );
454  topState.start( "creating adjacency tables" );
455  rval = create_adjacency_tables();
456  topState.end( rval );
457  if( MB_SUCCESS != rval ) return error( rval );
458  if( times ) times[CREATE_ADJ_TIME] = timer.time_elapsed();
459 
460  /**************** Create tag data *********************/
461 
462  debug_barrier();
463  dbgOut.tprint( 1, "creating tag tables\n" );
464  topState.start( "creating tag tables" );
465  rval = gather_tags( user_tag_list, user_tag_count );
466  if( MB_SUCCESS != rval ) return error( rval );
467  rval = create_tag_tables();
468  topState.end( rval );
469  if( MB_SUCCESS != rval ) return error( rval );
470  if( times ) times[CREATE_TAG_TIME] = timer.time_elapsed();
471 
472  /************** Close serial file and reopen parallel *****************/
473 
474  if( 0 == myPcomm->proc_config().proc_rank() ) mhdf_closeFile( filePtr, &status );
475 
476  MPI_Barrier( myPcomm->proc_config().proc_comm() );
477  dbgOut.tprint( 1, "(re)opening file in parallel mode\n" );
478  unsigned long junk;
479  hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS );
480  H5Pset_fapl_mpio( hdf_opt, myPcomm->proc_config().proc_comm(), info );
481  filePtr = mhdf_openFileWithOpt( filename, 1, &junk, id_type, hdf_opt, &status );
482  H5Pclose( hdf_opt );
483  if( !filePtr )
484  {
485  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
486  }
487 
488  if( collectiveIO )
489  {
490  dbgOut.print( 1, "USING COLLECTIVE IO\n" );
491  writeProp = H5Pcreate( H5P_DATASET_XFER );
492  H5Pset_dxpl_mpio( writeProp, H5FD_MPIO_COLLECTIVE );
493  }
494 
495  /* Test if we can use H5S_APPEND when selecting hyperslabs */
496  if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
497  ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
498  {
499  dbgOut.print( 1, "HDF5 library supports H5Sselect_hyperlsab with H5S_SELECT_APPEND\n" );
500  hslabOp = H5S_SELECT_APPEND;
501  }
502 
503  dbgOut.tprint( 1, "Exiting parallel_create_file\n" );
504  return MB_SUCCESS;
505 }
506 
508 {
510  std::string name1, name2;
511 
512  public:
514  bool operator()( const WriteHDF5::TagDesc& t1, const WriteHDF5::TagDesc& t2 );
515 };
516 
518 {
519  iFace->tag_get_name( t1.tag_id, name1 );
520  iFace->tag_get_name( t2.tag_id, name2 );
521  return name1 < name2;
522 }
523 
525 {
528  int size;
529  int name_len;
531  char name[sizeof( unsigned long )];
532 
533  static size_t pad( size_t len )
534  {
535  if( len % sizeof( unsigned long ) )
536  return len + sizeof( unsigned long ) - len % sizeof( unsigned long );
537  else
538  return len;
539  }
540 
541  static size_t def_val_bytes( int def_val_len, DataType type )
542  {
543  switch( type )
544  {
545  case MB_TYPE_BIT:
546  return def_val_len ? 1 : 0;
547  case MB_TYPE_OPAQUE:
548  return def_val_len;
549  case MB_TYPE_INTEGER:
550  return def_val_len * sizeof( int );
551  case MB_TYPE_DOUBLE:
552  return def_val_len * sizeof( double );
553  case MB_TYPE_HANDLE:
554  return def_val_len * sizeof( EntityHandle );
555  }
556  return 0;
557  }
558 
559  static size_t len( int name_len, int def_val_len, DataType type )
560  {
561  return sizeof( serial_tag_data ) + pad( name_len + def_val_bytes( def_val_len, type ) ) -
562  sizeof( unsigned long );
563  }
564  size_t len() const
565  {
566  return len( name_len, def_val_len, type );
567  }
569  {
570  return def_val_len ? name + name_len : 0;
571  }
572  const void* default_value() const
573  {
574  return const_cast< serial_tag_data* >( this )->default_value();
575  }
576  void set_default_value( const void* val )
577  {
578  memcpy( default_value(), val, def_val_bytes( def_val_len, type ) );
579  }
580 };
581 
583  const WriteHDF5::TagDesc& tag )
584 {
585  ErrorCode rval;
586 
587  std::string name;
588  rval = iFace->tag_get_name( tag.tag_id, name );
589  if( MB_SUCCESS != rval ) return error( rval );
590 
591  // Get name length, including space for null char
592  size_t name_len = name.size() + 1;
593  if( name_len == 1 ) return MB_SUCCESS; // Skip tags with no name
594 
595  DataType data_type;
596  rval = iFace->tag_get_data_type( tag.tag_id, data_type );
597  if( MB_SUCCESS != rval ) return error( rval );
598 
599  // Get default value
600  int def_val_len;
601  const void* def_val;
602  if( MB_SUCCESS != iFace->tag_get_default_value( tag.tag_id, def_val, def_val_len ) )
603  {
604  def_val_len = 0;
605  def_val = 0;
606  }
607 
608  // Allocate struct within buffer
609  size_t init_size = buffer.size();
610  buffer.resize( init_size + serial_tag_data::len( name_len, def_val_len, data_type ) );
611  serial_tag_data* ptr = reinterpret_cast< serial_tag_data* >( &buffer[init_size] );
612 
613  // Populate struct
614  rval = iFace->tag_get_type( tag.tag_id, ptr->storage );
615  if( MB_SUCCESS != rval ) return error( rval );
616  ptr->type = data_type;
617  rval = iFace->tag_get_length( tag.tag_id, ptr->size );
618  if( MB_VARIABLE_DATA_LENGTH == rval )
619  ptr->size = MB_VARIABLE_LENGTH;
620  else if( MB_SUCCESS != rval )
621  return error( rval );
622  ptr->name_len = name_len;
623  Range range;
624  memset( ptr->name, 0, ptr->name_len );
625  memcpy( ptr->name, name.data(), name.size() );
626  ptr->def_val_len = def_val_len;
627  ptr->set_default_value( def_val );
628 
629  return MB_SUCCESS;
630 }
631 
632 ErrorCode WriteHDF5Parallel::check_serial_tag_data( const std::vector< unsigned char >& buffer,
633  std::vector< TagDesc* >* missing,
634  std::vector< TagDesc* >* newlist )
635 {
636  ErrorCode rval;
637 
638  // Use 'write_sparse' field as a 'visited' mark
639  std::list< TagDesc >::iterator tag_iter;
640  if( missing )
641  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
642  tag_iter->write_sparse = true;
643 
644  // Use a set as a temporary for what will ultimately go in
645  // newlist because we need to pass back newlist in the order
646  // of the tagList member.
647  std::set< TagDesc* > newset;
648 
649  // Iterate over data from, updating the local list of tags.
650  // Be careful to keep tagList sorted such that in the end all
651  // procs have the same list in the same order.
652  std::vector< unsigned char >::const_iterator diter = buffer.begin();
653  tag_iter = tagList.begin();
654  while( diter < buffer.end() )
655  {
656  // Get struct from buffer
657  const serial_tag_data* ptr = reinterpret_cast< const serial_tag_data* >( &*diter );
658 
659  // Find local struct for tag
660  std::string name( ptr->name );
661  std::string n;
662  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
663  if( n > name )
664  {
665  tag_iter = tagList.begin(); // New proc, start search from beginning
666  }
667  iFace->tag_get_name( tag_iter->tag_id, n );
668  while( n < name )
669  {
670  ++tag_iter;
671  if( tag_iter == tagList.end() ) break;
672  iFace->tag_get_name( tag_iter->tag_id, n );
673  }
674  if( tag_iter == tagList.end() || n != name )
675  { // New tag
676  TagDesc newtag;
677 
678  if( ptr->size == MB_VARIABLE_LENGTH )
679  rval = iFace->tag_get_handle( name.c_str(), ptr->def_val_len, ptr->type, newtag.tag_id,
680  MB_TAG_VARLEN | MB_TAG_CREAT | ptr->storage, ptr->default_value() );
681  else
682  rval = iFace->tag_get_handle( name.c_str(), ptr->size, ptr->type, newtag.tag_id,
683  MB_TAG_CREAT | ptr->storage, ptr->default_value() );
684  if( MB_SUCCESS != rval ) return error( rval );
685 
686  newtag.sparse_offset = 0;
687  newtag.var_data_offset = 0;
688  newtag.write_sparse = false;
689  newtag.max_num_ents = 0;
690  newtag.max_num_vals = 0;
691 
692  tag_iter = tagList.insert( tag_iter, newtag );
693  if( newlist ) newset.insert( &*tag_iter );
694  }
695  else
696  { // Check that tag is as expected
697  DataType type;
698  iFace->tag_get_data_type( tag_iter->tag_id, type );
699  if( type != ptr->type )
700  {
701  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent data type for tag \"" << name << "\"" );
702  }
703  int size;
704  iFace->tag_get_length( tag_iter->tag_id, size );
705  if( size != ptr->size )
706  {
707  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent size for tag \"" << name << "\"" );
708  }
709  tag_iter->write_sparse = false;
710  }
711 
712  // Step to next variable-length struct.
713  diter += ptr->len();
714  }
715 
716  // Now pass back any local tags that weren't in the buffer
717  if( missing )
718  {
719  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
720  {
721  if( tag_iter->write_sparse )
722  {
723  tag_iter->write_sparse = false;
724  missing->push_back( &*tag_iter );
725  }
726  }
727  }
728 
729  // Be careful to populate newlist in the same, sorted, order as tagList
730  if( newlist )
731  {
732  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
733  if( newset.find( &*tag_iter ) != newset.end() ) newlist->push_back( &*tag_iter );
734  }
735 
736  return MB_SUCCESS;
737 }
738 
739 static void set_bit( int position, unsigned char* bytes )
740 {
741  int byte = position / 8;
742  int bit = position % 8;
743  bytes[byte] |= ( ( (unsigned char)1 ) << bit );
744 }
745 
746 static bool get_bit( int position, const unsigned char* bytes )
747 {
748  int byte = position / 8;
749  int bit = position % 8;
750  return 0 != ( bytes[byte] & ( ( (unsigned char)1 ) << bit ) );
751 }
752 
754 {
755  std::list< TagDesc >::iterator tag_iter;
756  ErrorCode rval;
757  int err;
758  const int rank = myPcomm->proc_config().proc_rank();
759  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
760 
761  subState.start( "negotiating tag list" );
762 
763  dbgOut.tprint( 1, "communicating tag metadata\n" );
764 
765  dbgOut.printf( 2, "Exchanging tag data for %d tags.\n", (int)tagList.size() );
766 
767  // Sort tagList contents in alphabetical order by tag name
768  tagList.sort( TagNameCompare( iFace ) );
769 
770  // Negotiate total list of tags to write
771 
772  // Build concatenated list of all tag data
773  std::vector< unsigned char > tag_buffer;
774  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
775  {
776  rval = append_serial_tag_data( tag_buffer, *tag_iter );
777  CHECK_MB( rval );
778  }
779 
780  // Broadcast list from root to all other procs
781  unsigned long size = tag_buffer.size();
782  err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, comm );
783  CHECK_MPI( err );
784  tag_buffer.resize( size );
785  err = MPI_Bcast( &tag_buffer[0], size, MPI_UNSIGNED_CHAR, 0, comm );
786  CHECK_MPI( err );
787 
788  // Update local tag list
789  std::vector< TagDesc* > missing;
790  rval = check_serial_tag_data( tag_buffer, &missing, 0 );
791  CHECK_MB( rval );
792 
793  // Check if we're done (0->done, 1->more, 2+->error)
794  int code, lcode = ( MB_SUCCESS != rval ) ? rval + 2 : missing.empty() ? 0 : 1;
795  err = MPI_Allreduce( &lcode, &code, 1, MPI_INT, MPI_MAX, comm );
796  CHECK_MPI( err );
797  if( code > 1 )
798  {
799  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
800  return error( (ErrorCode)( code - 2 ) );
801  }
802 
803  // If not done...
804  if( code )
805  {
806  dbgOut.print( 1, "Not all procs had same tag definitions, negotiating...\n" );
807 
808  // Get tags defined on this proc but not on root proc
809  tag_buffer.clear();
810  for( size_t i = 0; i < missing.size(); ++i )
811  {
812  rval = append_serial_tag_data( tag_buffer, *missing[i] );
813  CHECK_MB( rval );
814  }
815 
816  // Gather extra tag definitions on root processor
817  std::vector< int > junk; // don't care how many from each proc
818  assert( rank || tag_buffer.empty() ); // must be empty on root
819  err = my_Gatherv( &tag_buffer[0], tag_buffer.size(), MPI_UNSIGNED_CHAR, tag_buffer, junk, 0, comm );
820  CHECK_MPI( err );
821 
822  // Process serialized tag descriptions on root, and
823  rval = MB_SUCCESS;
824  if( 0 == rank )
825  {
826  // Process serialized tag descriptions on root, and
827  std::vector< TagDesc* > newlist;
828  rval = check_serial_tag_data( tag_buffer, 0, &newlist );
829  tag_buffer.clear();
830  // re-serialize a unique list of new tag definitions
831  for( size_t i = 0; MB_SUCCESS == rval && i != newlist.size(); ++i )
832  {
833  rval = append_serial_tag_data( tag_buffer, *newlist[i] );
834  CHECK_MB( rval );
835  }
836  }
837 
838  // Broadcast any new tag definitions from root to other procs
839  long this_size = tag_buffer.size();
840  if( MB_SUCCESS != rval ) this_size = -rval;
841  err = MPI_Bcast( &this_size, 1, MPI_LONG, 0, comm );
842  CHECK_MPI( err );
843  if( this_size < 0 )
844  {
845  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
846  return error( (ErrorCode)-this_size );
847  }
848  tag_buffer.resize( this_size );
849  err = MPI_Bcast( &tag_buffer[0], this_size, MPI_UNSIGNED_CHAR, 0, comm );
850  CHECK_MPI( err );
851 
852  // Process new tag definitions
853  rval = check_serial_tag_data( tag_buffer, 0, 0 );
854  CHECK_MB( rval );
855  }
856 
857  subState.end();
858  subState.start( "negotiate which element/tag combinations are dense" );
859 
860  // Figure out for which tag/element combinations we can
861  // write dense tag data.
862 
863  // Construct a table of bits,
864  // where each row of the table corresponds to a tag
865  // and each column to an element group.
866 
867  // Two extra, because first is nodes and last is sets.
868  // (n+7)/8 is ceil(n/8)
869  const int bytes_per_tag = ( exportList.size() + 9 ) / 8;
870  std::vector< unsigned char > data( bytes_per_tag * tagList.size(), 0 );
871  std::vector< unsigned char > recv( data.size(), 0 );
872  unsigned char* iter = &data[0];
873  if( writeTagDense && !data.empty() )
874  {
875  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, iter += bytes_per_tag )
876  {
877 
878  Range tagged;
879  rval = get_sparse_tagged_entities( *tag_iter, tagged );
880  CHECK_MB( rval );
881 
882  int s;
883  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) continue;
884 
885  std::string n;
886  iFace->tag_get_name( tag_iter->tag_id,
887  n ); // Second time we've called, so shouldn't fail
888 
889  // Check if we want to write this tag in dense format even if not
890  // all of the entities have a tag value. The criterion of this
891  // is that the tag be dense, have a default value, and have at
892  // least 2/3 of the entities tagged.
893  bool prefer_dense = false;
894  TagType type;
895  rval = iFace->tag_get_type( tag_iter->tag_id, type );
896  CHECK_MB( rval );
897  if( MB_TAG_DENSE == type )
898  {
899  const void* defval = 0;
900  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
901  if( MB_SUCCESS == rval ) prefer_dense = true;
902  }
903 
904  int i = 0;
905  if( check_dense_format_tag( nodeSet, tagged, prefer_dense ) )
906  {
907  set_bit( i, iter );
908  dbgOut.printf( 2, "Can write dense data for \"%s\"/Nodes\n", n.c_str() );
909  }
910  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
911  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
912  {
913  // when writing in parallel, on some partitions, some of these element ranges might
914  // be empty so do not turn this tag as sparse, just because of that, leave it dense,
915  // if we prefer dense
916  if( ( prefer_dense && ex_iter->range.empty() ) ||
917  check_dense_format_tag( *ex_iter, tagged, prefer_dense ) )
918  {
919  set_bit( i, iter );
920  dbgOut.printf( 2, "Can write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
921  }
922  }
923  if( check_dense_format_tag( setSet, tagged, prefer_dense ) )
924  {
925  set_bit( i, iter );
926  dbgOut.printf( 2, "Can write dense data for \"%s\"/Sets\n", n.c_str() );
927  }
928  }
929 
930  // Do bit-wise AND of list over all processors (only write dense format
931  // if all proccesses want dense format for this group of entities).
932  err = MPI_Allreduce( &data[0], &recv[0], data.size(), MPI_UNSIGNED_CHAR, MPI_BAND,
934  CHECK_MPI( err );
935  } // if (writeTagDense)
936 
937  // Store initial counts for sparse-formatted tag data.
938  // The total number of values to send and receive will be the number of
939  // tags plus the number of var-len tags because we need to negotiate
940  // offsets into two different tables for the var-len tags.
941  std::vector< long > counts;
942 
943  // Record dense tag/element combinations
944  iter = &recv[0];
945  const unsigned char* iter2 = &data[0];
946  for( tag_iter = tagList.begin(); tag_iter != tagList.end();
947  ++tag_iter, iter += bytes_per_tag, iter2 += bytes_per_tag )
948  {
949 
950  Range tagged;
951  rval = get_sparse_tagged_entities( *tag_iter, tagged );
952  CHECK_MB( rval );
953 
954  std::string n;
955  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
956 
957  int i = 0;
958  if( get_bit( i, iter ) )
959  {
960  assert( get_bit( i, iter2 ) );
961  tag_iter->dense_list.push_back( nodeSet );
962  tagged -= nodeSet.range;
963  dbgOut.printf( 2, "Will write dense data for \"%s\"/Nodes\n", n.c_str() );
964  }
965  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
966  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
967  {
968  if( get_bit( i, iter ) )
969  {
970  assert( get_bit( i, iter2 ) );
971  tag_iter->dense_list.push_back( *ex_iter );
972  dbgOut.printf( 2, "WIll write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
973  tagged -= ex_iter->range;
974  }
975  }
976  if( get_bit( i, iter ) )
977  {
978  assert( get_bit( i, iter2 ) );
979  tag_iter->dense_list.push_back( setSet );
980  dbgOut.printf( 2, "Will write dense data for \"%s\"/Sets\n", n.c_str() );
981  tagged -= setSet.range;
982  }
983 
984  counts.push_back( tagged.size() );
985 
986  int s;
987  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
988  {
989  unsigned long data_len;
990  rval = get_tag_data_length( *tag_iter, tagged, data_len );
991  CHECK_MB( rval );
992  counts.push_back( data_len );
993  }
994  }
995 
996  subState.end();
997  subState.start( "Negotiate offsets for sparse tag info" );
998 
999  std::vector< long > offsets( counts.size() ), maxima( counts.size() ), totals( counts.size() );
1000  rval = create_dataset( counts.size(), &counts[0], &offsets[0], &maxima[0], &totals[0] );
1001  CHECK_MB( rval );
1002 
1003  // Copy values into local structs and if root then create tables
1004  size_t idx = 0;
1005  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx )
1006  {
1007  assert( idx < counts.size() );
1008  tag_iter->sparse_offset = offsets[idx];
1009  tag_iter->max_num_ents = maxima[idx];
1010  tag_iter->write_sparse = ( 0 != totals[idx] );
1011  int s;
1012  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1013  {
1014  ++idx;
1015  assert( idx < counts.size() );
1016  tag_iter->var_data_offset = offsets[idx];
1017  tag_iter->max_num_vals = maxima[idx];
1018  }
1019  else
1020  {
1021  tag_iter->var_data_offset = 0;
1022  tag_iter->max_num_vals = 0;
1023  }
1024  }
1025 
1026  subState.end();
1027 
1028  // Create tag tables on root process
1029  if( 0 == myPcomm->proc_config().proc_rank() )
1030  {
1031  size_t iidx = 0;
1032  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx )
1033  {
1034  assert( iidx < totals.size() );
1035  unsigned long num_ents = totals[iidx];
1036  unsigned long num_val = 0;
1037  int s;
1038  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1039  {
1040  ++iidx;
1041  assert( iidx < totals.size() );
1042  num_val = totals[iidx];
1043  }
1044  dbgOut.printf( 2, "Writing tag description for tag 0x%lx with %lu values\n",
1045  (unsigned long)tag_iter->tag_id, num_val ? num_val : num_ents );
1046 
1047  rval = create_tag( *tag_iter, num_ents, num_val );
1048  if( MB_SUCCESS != rval ) return error( rval );
1049  }
1050  }
1051 
1052  if( dbgOut.get_verbosity() > 1 )
1053  {
1054  dbgOut.printf( 2, "Tags: %12s %8s %8s %8s %8s %8s\n", "Name", "Count", "Offset", "Var Off", "Max Ent",
1055  "Handle" );
1056 
1057  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
1058  {
1059  std::string name;
1060  iFace->tag_get_name( tag_iter->tag_id, name );
1061  size_t this_size;
1062  get_num_sparse_tagged_entities( *tag_iter, this_size );
1063  dbgOut.printf( 2, "%18s %8lu %8lu %8lu %8lu 0x%7lx\n", name.c_str(), (unsigned long)this_size,
1064  (unsigned long)tag_iter->sparse_offset, (unsigned long)tag_iter->var_data_offset,
1065  (unsigned long)tag_iter->max_num_ents, (unsigned long)tag_iter->tag_id );
1066  }
1067  }
1068 
1069  return MB_SUCCESS;
1070 }
1071 
1073 {
1074  long start_id;
1076  long total;
1077 };
1078 STATIC_ASSERT( ( sizeof( DatasetVals ) == 3 * sizeof( long ) ) );
1079 
1081  const long* num_owned,
1082  long* offsets_out,
1083  long* max_proc_entities,
1084  long* total_entities,
1085  const DataSetCreator& creator,
1086  ExportSet* groups[],
1087  wid_t* first_ids_out )
1088 {
1089  int result;
1090  ErrorCode rval;
1091  const unsigned rank = myPcomm->proc_config().proc_rank();
1092  const unsigned nproc = myPcomm->proc_config().proc_size();
1093  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1094 
1095  // Gather entity counts for each processor on root
1096  std::vector< long > counts( rank ? 0 : nproc * num_datasets );
1097  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &num_owned, sizeof( long ) );
1098  result = MPI_Gather( const_cast< long* >( num_owned ), num_datasets, MPI_LONG, &counts[0], num_datasets, MPI_LONG,
1099  0, comm );
1100  CHECK_MPI( result );
1101 
1102  // Create node data in file
1103  DatasetVals zero_val = { 0, 0, 0 };
1104  std::vector< DatasetVals > cumulative( num_datasets, zero_val );
1105  if( rank == 0 )
1106  {
1107  for( unsigned i = 0; i < nproc; i++ )
1108  {
1109  const long* proc_data = &counts[i * num_datasets];
1110  for( int index = 0; index < num_datasets; ++index )
1111  {
1112  cumulative[index].total += proc_data[index];
1113  if( proc_data[index] > cumulative[index].max_count ) cumulative[index].max_count = proc_data[index];
1114  }
1115  }
1116 
1117  for( int index = 0; index < num_datasets; ++index )
1118  {
1119  if( cumulative[index].total )
1120  {
1121  rval = creator( this, cumulative[index].total, groups ? groups[index] : 0, cumulative[index].start_id );
1122  CHECK_MB( rval );
1123  }
1124  else
1125  {
1126  cumulative[index].start_id = -1;
1127  }
1128  }
1129  }
1130 
1131  // Send id offset to every proc
1132  result = MPI_Bcast( (void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm );
1133  CHECK_MPI( result );
1134  for( int index = 0; index < num_datasets; ++index )
1135  {
1136  if( first_ids_out ) first_ids_out[index] = (wid_t)cumulative[index].start_id;
1137  max_proc_entities[index] = cumulative[index].max_count;
1138  total_entities[index] = cumulative[index].total;
1139  }
1140 
1141  // Convert array of per-process counts to per-process offsets
1142  if( rank == 0 )
1143  {
1144  // Initialize prev_size with data sizes for root process
1145  std::vector< long > prev_size( counts.begin(), counts.begin() + num_datasets );
1146  // Root process gets offset zero
1147  std::fill( counts.begin(), counts.begin() + num_datasets, 0L );
1148  // For each proc other than this one (root)
1149  for( unsigned i = 1; i < nproc; ++i )
1150  {
1151  // Get pointer to offsets for previous process in list
1152  long* prev_data = &counts[( i - 1 ) * num_datasets];
1153  // Get pointer to offsets for this process in list
1154  long* proc_data = &counts[i * num_datasets];
1155  // For each data set
1156  for( int j = 0; j < num_datasets; ++j )
1157  {
1158  // Get size of data in dataset from process i
1159  long mysize = proc_data[j];
1160  // Offset for process i is offset of previous process plus
1161  // number of values previous process will write
1162  proc_data[j] = prev_data[j] + prev_size[j];
1163  // Store my size, as it is no longer available in 'counts'
1164  prev_size[j] = mysize;
1165  }
1166  }
1167  }
1168 
1169  // Send each proc it's offset in the table
1170  if( rank == 0 )
1171  {
1172  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &counts[0], num_datasets * nproc * sizeof( long ) );
1173  }
1174  result = MPI_Scatter( &counts[0], num_datasets, MPI_LONG, offsets_out, num_datasets, MPI_LONG, 0, comm );
1175  CHECK_MPI( result );
1176 
1177  return MB_SUCCESS;
1178 }
1179 
1181 {
1182  nodeSet.num_nodes = dimension; // Put it here so NodeSetCreator can access it
1183  struct NodeSetCreator : public DataSetCreator
1184  {
1185  ErrorCode operator()( WriteHDF5* file, long count, const ExportSet* group, long& start_id ) const
1186  {
1187  mhdf_Status status;
1188  hid_t handle = mhdf_createNodeCoords( file->file_ptr(), group->num_nodes, count, &start_id, &status );
1189  CHECK_HDFN( status );
1190  mhdf_closeData( file->file_ptr(), handle, &status );
1191  CHECK_HDFN( status );
1192  return MB_SUCCESS;
1193  }
1194  };
1195 
1196  const long count = nodeSet.range.size();
1197  ExportSet* array[] = { &nodeSet };
1199  NodeSetCreator(), array, &nodeSet.first_id );
1200  CHECK_MB( rval );
1202 }
1203 
1204 struct elemtype
1205 {
1206  int mbtype;
1207  int numnode;
1208 
1209  elemtype( int vals[2] ) : mbtype( vals[0] ), numnode( vals[1] ) {}
1210  elemtype( int t, int n ) : mbtype( t ), numnode( n ) {}
1211 
1212  bool operator==( const elemtype& other ) const
1213  {
1214  return mbtype == other.mbtype && ( mbtype == MBENTITYSET || numnode == other.numnode );
1215  }
1216  bool operator<( const elemtype& other ) const
1217  {
1218  if( mbtype > other.mbtype ) return false;
1219 
1220  return mbtype < other.mbtype || ( mbtype != MBENTITYSET && numnode < other.numnode );
1221  }
1222  bool operator!=( const elemtype& other ) const
1223  {
1224  return !this->operator==( other );
1225  }
1226 };
1227 
1229 {
1230  int result;
1231  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1232 
1233  exportList.sort();
1234  int num_types = exportList.size();
1235 
1236  // Get list of types on this processor
1237  typedef std::vector< std::pair< int, int > > typelist;
1238  typelist my_types( num_types );
1239  (void)VALGRIND_MAKE_VEC_UNDEFINED( my_types );
1240  typelist::iterator viter = my_types.begin();
1241  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
1242  {
1243  viter->first = eiter->type;
1244  viter->second = eiter->num_nodes;
1245  ++viter;
1246  }
1247 
1248  dbgOut.print( 2, "Local Element Types:\n" );
1249  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1250  {
1251  int type = viter->first;
1252  int count = viter->second;
1253  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)type ), count );
1254  }
1255 
1256  // Broadcast number of types from root to all nodes
1257  int num_types0 = num_types;
1258  result = MPI_Bcast( &num_types0, 1, MPI_INT, 0, comm );
1259  CHECK_MPI( result );
1260  // Broadcast type list from root to all nodes
1261  typelist root_types( num_types0 );
1262  if( 0 == myPcomm->proc_config().proc_rank() ) root_types = my_types;
1263  result = MPI_Bcast( (void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm );
1264  CHECK_MPI( result );
1265 
1266  // Build local list of any types that root did not know about
1267  typelist non_root_types;
1268  viter = root_types.begin();
1269  for( typelist::iterator iter = my_types.begin(); iter != my_types.end(); ++iter )
1270  {
1271  if( viter == root_types.end() || *viter != *iter )
1272  non_root_types.push_back( *iter );
1273  else
1274  ++viter;
1275  }
1276 
1277  // Determine if any process had types not defined on the root
1278  int non_root_count = non_root_types.size();
1279  int not_done;
1280  result = MPI_Allreduce( &non_root_count, &not_done, 1, MPI_INT, MPI_LOR, comm );
1281  CHECK_MPI( result );
1282  if( not_done )
1283  {
1284  // Get number of types each processor has that root does not
1285  std::vector< int > counts( myPcomm->proc_config().proc_size() );
1286  int two_count = 2 * non_root_count;
1287  result = MPI_Gather( &two_count, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, comm );
1288  CHECK_MPI( result );
1289 
1290  // Get list of types from each processor
1291  std::vector< int > displs( myPcomm->proc_config().proc_size() + 1 );
1292  (void)VALGRIND_MAKE_VEC_UNDEFINED( displs );
1293  displs[0] = 0;
1294  for( unsigned long i = 1; i <= myPcomm->proc_config().proc_size(); ++i )
1295  displs[i] = displs[i - 1] + counts[i - 1];
1296  int total = displs[myPcomm->proc_config().proc_size()];
1297  typelist alltypes( total / 2 );
1298  (void)VALGRIND_MAKE_VEC_UNDEFINED( alltypes );
1299  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &non_root_types[0], non_root_types.size() * sizeof( int ) );
1300  result = MPI_Gatherv( (void*)&non_root_types[0], 2 * non_root_count, MPI_INT, (int*)&alltypes[0], &counts[0],
1301  &displs[0], MPI_INT, 0, comm );
1302  CHECK_MPI( result );
1303 
1304  // Merge type lists.
1305  // Prefer O(n) insertions with O(ln n) search time because
1306  // we expect data from a potentially large number of processes,
1307  // but with a small total number of element types.
1308  if( 0 == myPcomm->proc_config().proc_rank() )
1309  {
1310  for( viter = alltypes.begin(); viter != alltypes.end(); ++viter )
1311  {
1312  typelist::iterator titer = std::lower_bound( my_types.begin(), my_types.end(), *viter );
1313  if( titer == my_types.end() || *titer != *viter ) my_types.insert( titer, *viter );
1314  }
1315 
1316  dbgOut.print( 2, "Global Element Types:\n" );
1317  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1318  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)viter->first ), viter->second );
1319  }
1320 
1321  // Send total number of types to each processor
1322  total = my_types.size();
1323  result = MPI_Bcast( &total, 1, MPI_INT, 0, comm );
1324  CHECK_MPI( result );
1325 
1326  // Send list of types to each processor
1327  my_types.resize( total );
1328  result = MPI_Bcast( (void*)&my_types[0], 2 * total, MPI_INT, 0, comm );
1329  CHECK_MPI( result );
1330  }
1331  else
1332  {
1333  // Special case: if root had types but some subset of procs did not
1334  // have those types, but there are no types that the root doesn't
1335  // know about then we still need to update processes that are missing
1336  // types.
1337  my_types.swap( root_types );
1338  }
1339 
1340  // Insert missing types into exportList, with an empty
1341  // range of entities to export.
1342  std::list< ExportSet >::iterator ex_iter = exportList.begin();
1343  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1344  {
1345  while( ex_iter != exportList.end() && *ex_iter < *viter )
1346  ++ex_iter;
1347 
1348  if( ex_iter == exportList.end() || !( *ex_iter == *viter ) )
1349  {
1350  ExportSet insert;
1351  insert.type = (EntityType)viter->first;
1352  insert.num_nodes = viter->second;
1353  insert.first_id = 0;
1354  insert.offset = 0;
1355  insert.adj_offset = 0;
1356  ex_iter = exportList.insert( ex_iter, insert );
1357  }
1358  }
1359 
1360  return MB_SUCCESS;
1361 }
1362 
1364 {
1365  struct ElemSetCreator : public DataSetCreator
1366  {
1367  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1368  {
1369  return file->create_elem_table( *ex, size, start_id );
1370  }
1371  };
1372 
1373  const int numtypes = exportList.size();
1374  if (numtypes > 0)
1375  {
1376  std::vector< ExportSet* > groups( numtypes );
1377  std::vector< long > counts( numtypes ), offsets( numtypes ), max_ents( numtypes ), total_ents( numtypes );
1378  std::vector< wid_t > start_ids( numtypes );
1379 
1380  size_t idx = 0;
1381  std::list< ExportSet >::iterator ex_iter;
1382  for( ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1383  {
1384  groups[idx] = &*ex_iter;
1385  counts[idx] = ex_iter->range.size();
1386  }
1387  ErrorCode rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &total_ents[0], ElemSetCreator(),
1388  &groups[0], &start_ids[0] );
1389  CHECK_MB( rval );
1390 
1391  for( idx = 0, ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1392  {
1393  ex_iter->first_id = start_ids[idx];
1394  ex_iter->offset = offsets[idx];
1395  ex_iter->max_num_ents = max_ents[idx];
1396  ex_iter->total_num_ents = total_ents[idx];
1397  rval = assign_ids( ex_iter->range, ex_iter->first_id + ex_iter->offset );
1398  CHECK_MB( rval );
1399  }
1400  }
1401 
1402  return MB_SUCCESS;
1403 }
1404 
1406 {
1407  struct AdjSetCreator : public DataSetCreator
1408  {
1409  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1410  {
1411  mhdf_Status status;
1412  hid_t handle = mhdf_createAdjacency( file->file_ptr(), ex->name(), size, &status );
1413  CHECK_HDFN( status );
1414  mhdf_closeData( file->file_ptr(), handle, &status );
1415  CHECK_HDFN( status );
1416  start_id = -1;
1417  return MB_SUCCESS;
1418  }
1419  };
1420 
1421  std::vector< ExportSet* > groups;
1422 #ifdef WRITE_NODE_ADJACENCIES
1423  groups.push_back( &nodeSet );
1424 #endif
1425  for( std::list< ExportSet >::iterator ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter )
1426  groups.push_back( &*ex_iter );
1427 
1428  ErrorCode rval;
1429  const int numtypes = groups.size();
1430  if (numtypes > 0)
1431  {
1432  std::vector< long > counts( numtypes );
1433  std::vector< long > offsets( numtypes );
1434  std::vector< long > max_ents( numtypes );
1435  std::vector< long > totals( numtypes );
1436  for( int i = 0; i < numtypes; ++i )
1437  {
1438  wid_t count;
1439  rval = count_adjacencies( groups[i]->range, count );
1440  CHECK_MB( rval );
1441  counts[i] = count;
1442  }
1443 
1444  rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &totals[0], AdjSetCreator(), &groups[0] );
1445  CHECK_MB( rval );
1446 
1447  // Cppcheck warning (false positive): variable groups is assigned a value that is never used
1448  for( int i = 0; i < numtypes; ++i )
1449  {
1450  groups[i]->max_num_adjs = max_ents[i];
1451  groups[i]->adj_offset = offsets[i];
1452  }
1453  }
1454  return MB_SUCCESS;
1455 }
1456 
1457 const unsigned SSVB = 3;
1458 
1459 void WriteHDF5Parallel::print_set_sharing_data( const Range& range, const char* label, Tag idt )
1460 {
1461  dbgOut.printf( SSVB, "set\tid\towner\t%-*s\tfid\tshared\n", (int)( sizeof( EntityHandle ) * 2 ), "handle" );
1462  for( Range::iterator it = range.begin(); it != range.end(); ++it )
1463  {
1464  int id;
1465  iFace->tag_get_data( idt, &*it, 1, &id );
1466  EntityHandle handle = 0;
1467  unsigned owner = 0;
1468  wid_t file_id = 0;
1469  myPcomm->get_entityset_owner( *it, owner, &handle );
1470  if( !idMap.find( *it, file_id ) ) file_id = 0;
1471  dbgOut.printf( SSVB, "%s\t%d\t%u\t%lx\t%lu\t", label, id, owner, (unsigned long)handle,
1472  (unsigned long)file_id );
1473  std::vector< unsigned > procs;
1474  myPcomm->get_entityset_procs( *it, procs );
1475  if( procs.empty() )
1476  dbgOut.print( SSVB, "<none>\n" );
1477  else
1478  {
1479  for( unsigned i = 0; i < procs.size() - 1; ++i )
1480  dbgOut.printf( SSVB, "%u,", procs[i] );
1481  dbgOut.printf( SSVB, "%u\n", procs.back() );
1482  }
1483  }
1484 }
1485 
1487 {
1488  const char* tag_names[][2] = { { MATERIAL_SET_TAG_NAME, "block" },
1489  { DIRICHLET_SET_TAG_NAME, "nodeset" },
1490  { NEUMANN_SET_TAG_NAME, "sideset" },
1491  { 0, 0 } };
1492 
1493  for( int i = 0; tag_names[i][0]; ++i )
1494  {
1495  Tag tag;
1496  if( MB_SUCCESS != iFace->tag_get_handle( tag_names[i][0], 1, MB_TYPE_INTEGER, tag ) ) continue;
1497 
1498  Range tagged;
1499  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, tagged );
1500  print_set_sharing_data( tagged, tag_names[i][1], tag );
1501  }
1502 
1503  Tag geom, id;
1505  id = iFace->globalId_tag();
1506 
1507  const char* geom_names[] = { "vertex", "curve", "surface", "volume" };
1508  for( int d = 0; d <= 3; ++d )
1509  {
1510  Range tagged;
1511  const void* vals[] = { &d };
1512  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &geom, vals, 1, tagged );
1513  print_set_sharing_data( tagged, geom_names[d], id );
1514  }
1515 }
1516 
1518 {
1519  ErrorCode rval;
1520  int mperr;
1521  const int TAG = 0xD0E;
1522  // const unsigned rank = myPcomm->proc_config().proc_rank();
1523  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1524 
1525  dbgOut.tprint( 1, "COMMUNICATING SHARED SET IDS\n" );
1526  dbgOut.print( 6, "Owned, shared sets: ", owned );
1527 
1528  // Post receive buffers for all procs for which we share sets
1529 
1530  std::vector< unsigned > procs;
1531  rval = myPcomm->get_entityset_owners( procs );
1532  CHECK_MB( rval );
1533  std::vector< unsigned >::iterator it = std::find( procs.begin(), procs.end(), myPcomm->proc_config().proc_rank() );
1534  if( it != procs.end() ) procs.erase( it );
1535 
1536  std::vector< MPI_Request > recv_req( procs.size(), MPI_REQUEST_NULL );
1537  std::vector< std::vector< unsigned long > > recv_buf( procs.size() );
1538 
1539  size_t recv_count = 0;
1540  for( size_t i = 0; i < procs.size(); ++i )
1541  {
1542  Range tmp;
1543  rval = myPcomm->get_owned_sets( procs[i], tmp );
1544  CHECK_MB( rval );
1545  size_t count =
1546  intersect( tmp, remote ).size(); // Necessary because we might not be writing all of the database
1547  if( count )
1548  {
1549  dbgOut.printf( 6, "Sets owned by proc %u (remote handles): ", procs[i] );
1550  if( dbgOut.get_verbosity() >= 6 )
1551  {
1552  Range remote_handles;
1553  tmp = intersect( tmp, remote );
1554  for( Range::iterator j = tmp.begin(); j != tmp.end(); ++j )
1555  {
1556  unsigned r;
1557  EntityHandle h;
1558  myPcomm->get_entityset_owner( *j, r, &h );
1559  assert( r == procs[i] );
1560  remote_handles.insert( h );
1561  }
1562  dbgOut.print( 6, remote_handles );
1563  }
1564  recv_count++;
1565  recv_buf[i].resize( 2 * count + 1 );
1566  dbgOut.printf( 5, "Posting receive buffer of size %lu for proc %u (%lu of %lu owned sets)\n",
1567  (unsigned long)recv_buf[i].size(), procs[i], count, tmp.size() );
1568  mperr =
1569  MPI_Irecv( &recv_buf[i][0], recv_buf[i].size(), MPI_UNSIGNED_LONG, procs[i], TAG, comm, &recv_req[i] );
1570  CHECK_MPI( mperr );
1571  }
1572  }
1573 
1574  // Send set ids to all procs with which we share them
1575 
1576  // First build per-process lists of sets for which we need to send data
1577  std::map< unsigned, Range > send_sets;
1578  std::vector< unsigned > set_procs;
1579  for( Range::reverse_iterator i = owned.rbegin(); i != owned.rend(); ++i )
1580  {
1581  set_procs.clear();
1582  rval = myPcomm->get_entityset_procs( *i, set_procs );
1583  CHECK_MB( rval );
1584  for( size_t j = 0; j < set_procs.size(); ++j )
1585  if( set_procs[j] != myPcomm->proc_config().proc_rank() ) send_sets[set_procs[j]].insert( *i );
1586  }
1587  assert( send_sets.find( myPcomm->proc_config().proc_rank() ) == send_sets.end() );
1588 
1589  // Now send the data
1590  std::vector< std::vector< unsigned long > > send_buf( send_sets.size() );
1591  std::vector< MPI_Request > send_req( send_sets.size() );
1592  std::map< unsigned, Range >::iterator si = send_sets.begin();
1593  for( size_t i = 0; si != send_sets.end(); ++si, ++i )
1594  {
1595  dbgOut.printf( 6, "Sending data for shared sets to proc %u: ", si->first );
1596  dbgOut.print( 6, si->second );
1597 
1598  send_buf[i].reserve( 2 * si->second.size() + 1 );
1599  send_buf[i].push_back( si->second.size() );
1600  for( Range::iterator j = si->second.begin(); j != si->second.end(); ++j )
1601  {
1602  send_buf[i].push_back( *j );
1603  send_buf[i].push_back( idMap.find( *j ) );
1604  }
1605  dbgOut.printf( 5, "Sending buffer of size %lu to proc %u (%lu of %lu owned sets)\n",
1606  (unsigned long)send_buf[i].size(), si->first, si->second.size(), owned.size() );
1607  mperr = MPI_Isend( &send_buf[i][0], send_buf[i].size(), MPI_UNSIGNED_LONG, si->first, TAG, comm, &send_req[i] );
1608  }
1609 
1610  // Process received data
1611  MPI_Status status;
1612  int idx;
1613  while( recv_count-- )
1614  {
1615  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
1616  CHECK_MPI( mperr );
1617 
1618  assert( (unsigned)status.MPI_SOURCE == procs[idx] );
1619  assert( 2 * recv_buf[idx].front() + 1 == recv_buf[idx].size() );
1620  const size_t n = std::min< size_t >( recv_buf[idx].front(), ( recv_buf[idx].size() - 1 ) / 2 );
1621  dbgOut.printf( 5, "Received buffer of size %lu from proc %d\n", (unsigned long)( 2 * n + 1 ),
1622  (int)status.MPI_SOURCE );
1623 
1624  for( size_t i = 0; i < n; ++i )
1625  {
1626  EntityHandle handle = 0;
1627  rval = myPcomm->get_entityset_local_handle( procs[idx], recv_buf[idx][2 * i + 1], handle );
1628  CHECK_MB( rval );
1629  assert( handle != 0 );
1630  if( !idMap.insert( handle, recv_buf[idx][2 * i + 2], 1 ).second )
1631  error( MB_FAILURE ); // Conflicting IDs??????
1632  }
1633 
1634  recv_req[idx] = MPI_REQUEST_NULL;
1635  }
1636  assert( MPI_SUCCESS == MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ) &&
1637  MPI_UNDEFINED == idx ); // Check that we got them all
1638 
1639  // Wait for all sends to complete before we release send
1640  // buffers (implicitly releases when we return from this function)
1641 
1642  std::vector< MPI_Status > stats( send_req.size() );
1643  mperr = MPI_Waitall( send_req.size(), &send_req[0], &stats[0] );
1644  CHECK_MPI( mperr );
1645 
1647 
1648  return MB_SUCCESS;
1649 }
1650 
1651 // void get_global_ids(Interface* iFace, const unsigned long* ptr,
1652 // size_t len, unsigned flags,
1653 // std::vector<int>& ids)
1654 //{
1655 // Tag idtag;
1656 // iFace->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, idtag);
1657 // for (size_t i = 0; i < len; ++i) {
1658 // if (flags & MESHSET_ORDERED) {
1659 // int tmp;
1660 // iFace->tag_get_data(idtag, ptr + i, 1, &tmp);
1661 // ids.push_back(tmp);
1662 // continue;
1663 // }
1664 //
1665 // EntityHandle s = ptr[i];
1666 // EntityHandle e = ptr[++i];
1667 // for (; s <= e; ++s) {
1668 // int tmp;
1669 // iFace->tag_get_data(idtag, &s, 1, &tmp);
1670 // ids.push_back(tmp);
1671 // }
1672 // }
1673 //}
1674 
1675 ErrorCode WriteHDF5Parallel::pack_set( Range::const_iterator it, unsigned long* buffer, size_t buffer_size )
1676 {
1677  ErrorCode rval;
1678  const EntityHandle* ptr;
1679  int len;
1680  unsigned char flags;
1681  std::vector< wid_t > tmp;
1682  size_t newlen;
1683 
1684  // Buffer must always contain at least flags and desired sizes
1685  assert( buffer_size >= 4 );
1686  buffer_size -= 4;
1687 
1688  Range::const_iterator nd = it;
1689  ++nd;
1690  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CONTENTS, &len, &flags );
1691  CHECK_MB( rval );
1692 
1693  // Tag mattag;
1694  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1695  // int block;
1696  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*it, 1, &block))
1697  // block = 0;
1698  //
1699  // if (block) {
1700  // std::vector<int> ids;
1701  // get_global_ids(iFace, ptr, len, flags, ids);
1702  //}
1703 
1704  if( len && !( flags & MESHSET_ORDERED ) )
1705  {
1706  tmp.clear();
1707  bool blocked = false;
1708  assert( ( 0 == len % 2 ) );
1709  rval = range_to_blocked_list( ptr, len / 2, tmp, blocked );
1710  CHECK_MB( rval );
1711  if( blocked ) flags |= mhdf_SET_RANGE_BIT;
1712  }
1713  else
1714  {
1715  tmp.resize( len );
1716  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1717  CHECK_MB( rval );
1718  tmp.resize( newlen );
1719  }
1720 
1721  buffer[0] = flags;
1722  buffer[1] = tmp.size();
1723  if( tmp.size() <= buffer_size ) std::copy( tmp.begin(), tmp.end(), buffer + 4 );
1724 
1725  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CHILDREN, &len );
1726  CHECK_MB( rval );
1727  tmp.resize( len );
1728  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1729  tmp.resize( newlen );
1730  buffer[2] = tmp.size();
1731  if( tmp.size() <= buffer_size - buffer[1] ) std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] );
1732 
1733  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::PARENTS, &len );
1734  CHECK_MB( rval );
1735  tmp.resize( len );
1736  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1737  tmp.resize( newlen );
1738  buffer[3] = tmp.size();
1739  if( tmp.size() <= buffer_size - buffer[1] - buffer[2] )
1740  std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] + buffer[2] );
1741 
1742  return MB_SUCCESS;
1743 }
1744 
1745 template < typename TYPE >
1746 static void convert_to_ranged_ids( const TYPE* buffer, size_t len, std::vector< WriteHDF5::wid_t >& result )
1747 {
1748  if( !len )
1749  {
1750  result.clear();
1751  return;
1752  }
1753 
1754  result.resize( len * 2 );
1755  Range tmp;
1756  for( size_t i = 0; i < len; i++ )
1757  tmp.insert( (EntityHandle)buffer[i] );
1758  result.resize( tmp.psize() * 2 );
1759  int j = 0;
1760  for( Range::const_pair_iterator pit = tmp.const_pair_begin(); pit != tmp.const_pair_end(); ++pit, j++ )
1761  {
1762  result[2 * j] = pit->first;
1763  result[2 * j + 1] = pit->second - pit->first + 1;
1764  }
1765 }
1766 
1767 static void merge_ranged_ids( const unsigned long* range_list, size_t len, std::vector< WriteHDF5::wid_t >& result )
1768 {
1769  typedef WriteHDF5::wid_t wid_t;
1770  assert( 0 == len % 2 );
1771  assert( 0 == result.size() % 2 );
1772  STATIC_ASSERT( sizeof( std::pair< wid_t, wid_t > ) == 2 * sizeof( wid_t ) );
1773 
1774  result.insert( result.end(), range_list, range_list + len );
1775  size_t plen = result.size() / 2;
1776  Range tmp;
1777  for( size_t i = 0; i < plen; i++ )
1778  {
1779  EntityHandle starth = (EntityHandle)result[2 * i];
1780  EntityHandle endh = (EntityHandle)result[2 * i] + (wid_t)result[2 * i + 1] - 1; // id + count - 1
1781  tmp.insert( starth, endh );
1782  }
1783  // Now convert back to std::vector<WriteHDF5::wid_t>, compressed range format
1784  result.resize( tmp.psize() * 2 );
1785  int j = 0;
1786  for( Range::const_pair_iterator pit = tmp.const_pair_begin(); pit != tmp.const_pair_end(); ++pit, j++ )
1787  {
1788  result[2 * j] = pit->first;
1789  result[2 * j + 1] = pit->second - pit->first + 1;
1790  }
1791 }
1792 
1793 static void merge_vector_ids( const unsigned long* list, size_t len, std::vector< WriteHDF5::wid_t >& result )
1794 {
1795  result.insert( result.end(), list, list + len );
1796 }
1797 
1798 ErrorCode WriteHDF5Parallel::unpack_set( EntityHandle set, const unsigned long* buffer, size_t buffer_size )
1799 {
1800  // Use local variables for readability
1801  assert( buffer_size >= 4 );
1802  assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size );
1803  const unsigned long flags = buffer[0];
1804  unsigned long num_content = buffer[1];
1805  const unsigned long num_child = buffer[2];
1806  const unsigned long num_parent = buffer[3];
1807  const unsigned long* contents = buffer + 4;
1808  const unsigned long* children = contents + num_content;
1809  const unsigned long* parents = children + num_child;
1810 
1811  SpecialSetData* data = find_set_data( set );
1812  assert( NULL != data );
1813  if( NULL == data ) return MB_FAILURE;
1814 
1815  // Tag mattag;
1816  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1817  // int block;
1818  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &set, 1, &block))
1819  // block = 0;
1820 
1821  // If either the current data or the new data is in ranged format,
1822  // then change the other to ranged format if it isn't already
1823  // in both cases when they differ, the data will end up "compressed range"
1824  std::vector< wid_t > tmp;
1825  if( ( flags & mhdf_SET_RANGE_BIT ) != ( data->setFlags & mhdf_SET_RANGE_BIT ) )
1826  {
1827  if( flags & mhdf_SET_RANGE_BIT )
1828  {
1829  tmp = data->contentIds;
1830  convert_to_ranged_ids( &tmp[0], tmp.size(), data->contentIds );
1831  data->setFlags |= mhdf_SET_RANGE_BIT;
1832  }
1833  else
1834  {
1835  tmp.clear();
1836  convert_to_ranged_ids( contents, num_content, tmp );
1837  num_content = tmp.size();
1838  if( sizeof( wid_t ) < sizeof( long ) )
1839  {
1840  size_t old_size = tmp.size();
1841  tmp.resize( sizeof( long ) * old_size / sizeof( wid_t ) );
1842  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1843  for( long i = ( (long)old_size ) - 1; i >= 0; --i )
1844  array[i] = tmp[i];
1845  contents = array;
1846  }
1847  else if( sizeof( wid_t ) > sizeof( long ) )
1848  {
1849  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1850  std::copy( tmp.begin(), tmp.end(), array );
1851  }
1852  contents = reinterpret_cast< unsigned long* >( &tmp[0] );
1853  }
1854  }
1855 
1856  if( data->setFlags & mhdf_SET_RANGE_BIT )
1857  merge_ranged_ids( contents, num_content, data->contentIds );
1858  else
1859  merge_vector_ids( contents, num_content, data->contentIds );
1860 
1861  merge_vector_ids( children, num_child, data->childIds );
1862  merge_vector_ids( parents, num_parent, data->parentIds );
1863  return MB_SUCCESS;
1864 }
1865 
1867 {
1868  ErrorCode rval;
1869  int mperr;
1870  const unsigned rank = myPcomm->proc_config().proc_rank();
1871  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1872 
1873  dbgOut.tprintf( 1, "COMMUNICATING SHARED SET DATA (%lu owned & %lu remote)\n", (unsigned long)owned.size(),
1874  (unsigned long)remote.size() );
1875 
1876  // Calculate the total number of messages to be in transit (send and receive)
1877  size_t nummess = 0;
1878  std::vector< unsigned > procs;
1879  ;
1880  Range shared( owned );
1881  shared.merge( remote );
1882  for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
1883  {
1884  procs.clear();
1885  rval = myPcomm->get_entityset_procs( *i, procs );
1886  CHECK_MB( rval );
1887  nummess += procs.size();
1888  }
1889 
1890  // Choose a receive buffer size. We need 4*sizeof(long) minimum,
1891  // but that is almost useless so use 16*sizeof(long) as the minimum
1892  // instead. Choose an upper limit such that we don't exceed 32 MB
1893  // of allocated memory (unless we absolutely must to meet the minimum.)
1894  // Also, don't initially choose buffers larger than 128*sizeof(long).
1895  const size_t MAX_BUFFER_MEM = 32 * 1024 * 1024 / sizeof( long );
1896  // const size_t INIT_BUFFER_SIZE = 128;
1897  const size_t INIT_BUFFER_SIZE = 1024;
1898  const size_t MIN_BUFFER_SIZE = 16;
1899  size_t init_buff_size = INIT_BUFFER_SIZE;
1900  if( init_buff_size * nummess > MAX_BUFFER_MEM ) init_buff_size = MAX_BUFFER_MEM / nummess;
1901  if( init_buff_size < MIN_BUFFER_SIZE ) init_buff_size = MIN_BUFFER_SIZE;
1902 
1903  dbgOut.printf( 2, "Using buffer size of %lu for an expected message count of %lu\n", (unsigned long)init_buff_size,
1904  (unsigned long)nummess );
1905 
1906  // Count number of recvs
1907  size_t numrecv = 0;
1908  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1909  {
1910  procs.clear();
1911  rval = myPcomm->get_entityset_procs( *i, procs );
1912  CHECK_MB( rval );
1913  numrecv += procs.size();
1914  if( std::find( procs.begin(), procs.end(), rank ) != procs.end() ) --numrecv;
1915  }
1916 
1917  // Post receive buffers for all owned sets for all sharing procs
1918  std::vector< MPI_Request > recv_req( numrecv, MPI_REQUEST_NULL );
1919  std::vector< MPI_Request > lrecv_req( numrecv, MPI_REQUEST_NULL );
1920 
1921  std::vector< std::vector< unsigned long > > recv_buf( numrecv, std::vector< unsigned long >( init_buff_size ) );
1922  int idx = 0;
1923  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1924  {
1925  procs.clear();
1926  rval = myPcomm->get_entityset_procs( *i, procs );
1927  CHECK_MB( rval );
1928  for( size_t j = 0; j < procs.size(); ++j )
1929  {
1930  if( procs[j] == rank ) continue;
1931  int tag = ID_FROM_HANDLE( *i );
1932  if( *i != CREATE_HANDLE( MBENTITYSET, tag ) )
1933  {
1934 #ifndef NDEBUG
1935  abort();
1936 #endif
1937  CHECK_MB( MB_FAILURE );
1938  }
1939  dbgOut.printf( 5, "Posting buffer to receive set %d from proc %u\n", tag, procs[j] );
1940  mperr =
1941  MPI_Irecv( &recv_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, procs[j], tag, comm, &recv_req[idx] );
1942  CHECK_MPI( mperr );
1943  ++idx;
1944  }
1945  }
1946  assert( (size_t)idx == numrecv );
1947 
1948  // Now send set data for all remote sets that I know about
1949  std::vector< MPI_Request > send_req( remote.size() );
1950  std::vector< std::vector< unsigned long > > send_buf( remote.size() );
1951  idx = 0;
1952  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
1953  {
1954  send_buf[idx].resize( init_buff_size );
1955  rval = pack_set( i, &send_buf[idx][0], init_buff_size );
1956  CHECK_MB( rval );
1957  EntityHandle remote_handle;
1958  unsigned owner;
1959  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
1960  CHECK_MB( rval );
1961 
1962  int tag = ID_FROM_HANDLE( remote_handle );
1963  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
1964  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n",
1965  send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner );
1966  mperr = MPI_Isend( &send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
1967  CHECK_MPI( mperr );
1968  }
1969 
1970  // Tag mattag;
1971  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1972 
1973  // Now initialize local data for managing contents of owned, shared sets
1974  assert( specialSets.empty() );
1975  specialSets.clear();
1976  specialSets.reserve( owned.size() );
1977  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1978  {
1979  // int block;
1980  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*i, 1, &block))
1981  // block = 0;
1982  // std::vector<int> ids;
1983 
1984  SpecialSetData data;
1985  data.setHandle = *i;
1986  rval = iFace->get_meshset_options( *i, data.setFlags );
1987  CHECK_MB( rval );
1988  specialSets.push_back( data );
1989  std::vector< EntityHandle > list;
1990  if( data.setFlags & MESHSET_ORDERED )
1991  {
1992  list.clear();
1993  rval = iFace->get_entities_by_handle( *i, list );
1994  CHECK_MB( rval );
1995  rval = vector_to_id_list( list, specialSets.back().contentIds, true );
1996  CHECK_MB( rval );
1997  // if (block)
1998  // get_global_ids(iFace, &list[0], list.size(), MESHSET_ORDERED, ids);
1999  }
2000  else
2001  {
2002  Range range;
2003  rval = iFace->get_entities_by_handle( *i, range );
2004  CHECK_MB( rval );
2005  bool ranged;
2006  rval = range_to_blocked_list( range, specialSets.back().contentIds, ranged );
2007  if( ranged ) specialSets.back().setFlags |= mhdf_SET_RANGE_BIT;
2008  // if (block) {
2009  // std::vector<EntityHandle> tmp;
2010  // for (Range::const_pair_iterator pi = range.const_pair_begin(); pi !=
2011  // range.const_pair_end(); ++pi) {
2012  // tmp.push_back(pi->first);
2013  // tmp.push_back(pi->second);
2014  // }
2015  // get_global_ids(iFace, &tmp[0], tmp.size(), ranged ? 0 : MESHSET_ORDERED, ids);
2016  //}
2017  }
2018 
2019  list.clear();
2020  rval = iFace->get_parent_meshsets( *i, list );
2021  CHECK_MB( rval );
2022  rval = vector_to_id_list( list, specialSets.back().parentIds, true );
2023  CHECK_MB( rval );
2024  rval = iFace->get_child_meshsets( *i, list );
2025  CHECK_MB( rval );
2026  rval = vector_to_id_list( list, specialSets.back().childIds, true );
2027  CHECK_MB( rval );
2028  }
2029 
2030  // Process received buffers, repost larger buffers where necessary
2031  size_t remaining = numrecv;
2032  numrecv = 0;
2033  while( remaining-- )
2034  {
2035  std::vector< unsigned long > dead;
2036  MPI_Status status;
2037  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
2038  CHECK_MPI( mperr );
2039  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2040  std::vector< unsigned long >& buff = recv_buf[idx];
2041  size_t size = buff[1] + buff[2] + buff[3] + 4;
2042  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", (unsigned long)size, status.MPI_TAG,
2043  status.MPI_SOURCE );
2044  if( size <= init_buff_size )
2045  {
2046  rval = unpack_set( handle, &buff[0], init_buff_size );
2047  CHECK_MB( rval );
2048  dead.swap( buff ); // Release memory
2049  }
2050  else
2051  {
2052  // Data was too big for init_buff_size
2053  // repost with larger buffer
2054  buff.resize( size );
2055  dbgOut.printf( 5, "Re-Posting buffer to receive set %d from proc %d with size %lu\n", status.MPI_TAG,
2056  status.MPI_SOURCE, (unsigned long)size );
2057  mperr = MPI_Irecv( &buff[0], size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm,
2058  &lrecv_req[idx] );
2059  CHECK_MPI( mperr );
2060  ++numrecv;
2061  }
2062  recv_req[idx] = MPI_REQUEST_NULL;
2063  }
2064 
2065  // Wait for sends to complete
2066  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2067 
2068  // Re-send sets that didn't fit initial buffer size
2069  idx = 0;
2070  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
2071  {
2072  std::vector< unsigned long >& buff = send_buf[idx];
2073  size_t size = buff[1] + buff[2] + buff[3] + 4;
2074  if( size <= init_buff_size ) continue;
2075 
2076  buff.resize( size );
2077  rval = pack_set( i, &buff[0], size );
2078  CHECK_MB( rval );
2079  EntityHandle remote_handle;
2080  unsigned owner;
2081  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
2082  CHECK_MB( rval );
2083 
2084  int tag = ID_FROM_HANDLE( remote_handle );
2085  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
2086  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n", (unsigned long)size, tag, owner );
2087  mperr = MPI_Isend( &buff[0], size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
2088  CHECK_MPI( mperr );
2089  }
2090 
2091  // Process received buffers
2092  remaining = numrecv;
2093  while( remaining-- )
2094  {
2095  std::vector< unsigned long > dead;
2096  MPI_Status status;
2097  mperr = MPI_Waitany( lrecv_req.size(), &lrecv_req[0], &idx, &status );
2098  CHECK_MPI( mperr );
2099  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2100  std::vector< unsigned long >& buff = recv_buf[idx];
2101  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", 4 + buff[1] + buff[2] + buff[3],
2102  status.MPI_TAG, status.MPI_SOURCE );
2103  rval = unpack_set( handle, &buff[0], buff.size() );
2104  CHECK_MB( rval );
2105  dead.swap( buff ); // Release memory
2106 
2107  lrecv_req[idx] = MPI_REQUEST_NULL;
2108  }
2109 
2110  // Wait for sends to complete
2111  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2112 
2113  return MB_SUCCESS;
2114 }
2115 
2117 {
2118  Range::const_iterator riter;
2119  const unsigned rank = myPcomm->proc_config().proc_rank();
2120 
2121  START_SERIAL;
2123  END_SERIAL;
2124  CpuTimer timer;
2125 
2126  // Remove remote sets from setSets
2127  Range shared, owned, remote;
2128  ErrorCode rval = myPcomm->get_shared_sets( shared );
2129  CHECK_MB( rval );
2130  shared = intersect( shared, setSet.range );
2131  rval = myPcomm->get_owned_sets( rank, owned );
2132  CHECK_MB( rval );
2133  owned = intersect( owned, setSet.range );
2134  remote = subtract( shared, owned );
2135  setSet.range = subtract( setSet.range, remote );
2136 
2137  // Create set meta table
2138  struct SetDescCreator : public DataSetCreator
2139  {
2140  ErrorCode operator()( WriteHDF5* writer, long size, const ExportSet*, long& start_id ) const
2141  {
2142  return writer->create_set_meta( size, start_id );
2143  }
2144  };
2145  long count = setSet.range.size();
2146  rval = create_dataset( 1, &count, &setSet.offset, &setSet.max_num_ents, &setSet.total_num_ents, SetDescCreator(),
2147  NULL, &setSet.first_id );
2148  CHECK_MB( rval );
2150 
2152  CHECK_MB( rval );
2153  if( times ) times[SET_OFFSET_TIME] = timer.time_elapsed();
2154 
2155  // Exchange file IDS for sets between all procs
2156  rval = communicate_shared_set_ids( owned, remote );
2157  CHECK_MB( rval );
2158  if( times ) times[SHARED_SET_IDS] = timer.time_elapsed();
2159 
2160  // Communicate remote set contents, children, etc.
2161  rval = communicate_shared_set_data( owned, remote );
2162  CHECK_MB( rval );
2163  if( times ) times[SHARED_SET_CONTENTS] = timer.time_elapsed();
2164 
2165  // Communicate counts for owned sets
2166  long data_counts[3]; // { #contents, #children, #parents }
2167  rval = count_set_size( setSet.range, data_counts[0], data_counts[1], data_counts[2] );
2168  CHECK_MB( rval );
2169  if( times ) times[SET_OFFSET_TIME] += timer.time_elapsed();
2170 
2171  long offsets[3], max_counts[3], totals[3];
2172  rval = create_dataset( 3, data_counts, offsets, max_counts, totals );
2173  CHECK_MB( rval );
2174 
2175  // Create the datasets
2176  if( 0 == myPcomm->proc_config().proc_rank() )
2177  {
2178  rval = create_set_tables( totals[0], totals[1], totals[2] );
2179  CHECK_MB( rval );
2180  }
2181 
2182  // Store communicated global data
2183  setContentsOffset = offsets[0];
2184  setChildrenOffset = offsets[1];
2185  setParentsOffset = offsets[2];
2186  maxNumSetContents = max_counts[0];
2187  maxNumSetChildren = max_counts[1];
2188  maxNumSetParents = max_counts[2];
2189  writeSetContents = totals[0] > 0;
2190  writeSetChildren = totals[1] > 0;
2191  writeSetParents = totals[2] > 0;
2192 
2193  dbgOut.printf( 2, "set contents: %ld local, %ld global, offset = %ld\n", data_counts[0], totals[0], offsets[0] );
2194  dbgOut.printf( 2, "set children: %ld local, %ld global, offset = %ld\n", data_counts[1], totals[1], offsets[1] );
2195  dbgOut.printf( 2, "set parents: %ld local, %ld global, offset = %ld\n", data_counts[2], totals[2], offsets[2] );
2196 
2197  return MB_SUCCESS;
2198 }
2199 
2201 {
2202  Range result;
2203  result.merge( intersect( range, nodeSet.range ) );
2204  result.merge( intersect( range, setSet.range ) );
2205  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
2206  result.merge( intersect( range, eiter->range ) );
2207 
2208  // result.merge(intersect(range, myParallelSets));
2209  Range sets;
2210  int junk;
2211  sets.merge( Range::lower_bound( range.begin(), range.end(), CREATE_HANDLE( MBENTITYSET, 0, junk ) ), range.end() );
2212  remove_remote_sets( relative, sets );
2213  result.merge( sets );
2214  range.swap( result );
2215 }
2216 
2218 {
2219  Range result( intersect( range, setSet.range ) );
2220  // Store the non-intersecting entities separately if needed
2221  // Range remaining(subtract(range, result));
2222  range.swap( result );
2223 }
2224 
2225 void WriteHDF5Parallel::remove_remote_entities( EntityHandle relative, std::vector< EntityHandle >& vect )
2226 {
2227  Range intrsct;
2228  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2229  intrsct.insert( *iter );
2230  remove_remote_entities( relative, intrsct );
2231 
2232  unsigned int read, write;
2233  for( read = write = 0; read < vect.size(); ++read )
2234  {
2235  if( intrsct.find( vect[read] ) != intrsct.end() )
2236  {
2237  if( read != write ) vect[write] = vect[read];
2238  ++write;
2239  }
2240  }
2241  if( write != vect.size() ) vect.resize( write );
2242 }
2243 
2244 void WriteHDF5Parallel::remove_remote_sets( EntityHandle relative, std::vector< EntityHandle >& vect )
2245 {
2246  Range intrsct;
2247  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2248  intrsct.insert( *iter );
2249  remove_remote_sets( relative, intrsct );
2250 
2251  unsigned int read, write;
2252  for( read = write = 0; read < vect.size(); ++read )
2253  {
2254  if( intrsct.find( vect[read] ) != intrsct.end() )
2255  {
2256  if( read != write ) vect[write] = vect[read];
2257  ++write;
2258  }
2259  }
2260  if( write != vect.size() ) vect.resize( write );
2261 }
2262 
2264 {
2265  ErrorCode rval;
2266 
2267  // For each entity owned on the interface, write its file id to
2268  // a tag. The sets of entities to be written should already contain
2269  // only owned entities so by intersecting with them we not only
2270  // filter by entities to be written, but also restrict to entities
2271  // owned by the proc
2272 
2273  // Get list of interface entities
2274  Range imesh, tmp;
2275  for( std::list< ExportSet >::reverse_iterator i = exportList.rbegin(); i != exportList.rend(); ++i )
2276  {
2277  tmp.clear();
2278  rval = myPcomm->filter_pstatus( i->range, PSTATUS_SHARED, PSTATUS_AND, -1, &tmp );
2279  if( MB_SUCCESS != rval ) return error( rval );
2280  imesh.merge( tmp );
2281  }
2282  tmp.clear();
2284  if( MB_SUCCESS != rval ) return error( rval );
2285  imesh.merge( tmp );
2286 
2287  // Create tag to store file IDs
2288  EntityHandle default_val = 0;
2289  Tag file_id_tag = 0;
2290  rval = iFace->tag_get_handle( "__hdf5_ll_fileid", 1, MB_TYPE_HANDLE, file_id_tag, MB_TAG_DENSE | MB_TAG_CREAT,
2291  &default_val );
2292  if( MB_SUCCESS != rval ) return error( rval );
2293 
2294  // Copy file IDs into tag
2295  std::vector< EntityHandle > file_id_vect( imesh.size() );
2297  std::vector< EntityHandle >::iterator j = file_id_vect.begin();
2298  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2299  {
2300  *j = idMap.find( *i );
2301  if( !*j )
2302  {
2303  iFace->tag_delete( file_id_tag );
2304  return error( MB_FAILURE );
2305  }
2306  }
2307  rval = iFace->tag_set_data( file_id_tag, imesh, &file_id_vect[0] );
2308  if( MB_SUCCESS != rval )
2309  {
2310  iFace->tag_delete( file_id_tag );
2311  return error( rval );
2312  }
2313 
2314  // Do communication
2315  rval = myPcomm->exchange_tags( file_id_tag, imesh );
2316  if( MB_SUCCESS != rval )
2317  {
2318  iFace->tag_delete( file_id_tag );
2319  return error( rval );
2320  }
2321 
2322  // Copy file IDs from tag into idMap for remote entities
2323  file_id_vect.resize( nonlocal.size() );
2324  rval = iFace->tag_get_data( file_id_tag, nonlocal, &file_id_vect[0] );
2325  if( MB_SUCCESS != rval )
2326  {
2327  iFace->tag_delete( file_id_tag );
2328  return error( rval );
2329  }
2330 
2331  j = file_id_vect.begin();
2332  for( i = nonlocal.begin(); i != nonlocal.end(); ++i, ++j )
2333  {
2334  if( *j == 0 )
2335  {
2336  int owner = -1;
2337  myPcomm->get_owner( *i, owner );
2338  const char* name = CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) );
2339  int id = ID_FROM_HANDLE( *i );
2340  MB_SET_ERR_CONT( "Process " << myPcomm->proc_config().proc_rank()
2341  << " did not receive valid id handle for shared " << name << " " << id
2342  << " owned by process " << owner );
2343  dbgOut.printf( 1,
2344  "Did not receive valid remote id for "
2345  "shared %s %d owned by process %d",
2346  name, id, owner );
2347  iFace->tag_delete( file_id_tag );
2348  return error( MB_FAILURE );
2349  }
2350  else
2351  {
2352  if( !idMap.insert( *i, *j, 1 ).second )
2353  {
2354  iFace->tag_delete( file_id_tag );
2355  return error( MB_FAILURE );
2356  }
2357  }
2358  }
2359 
2360 #ifndef NDEBUG
2361  // Check that writer is correct with regards to which entities
2362  // that it owns by verifying that the file ids that we thought
2363  // we were sending where not received instead
2364  file_id_vect.resize( imesh.size() );
2365  rval = iFace->tag_get_data( file_id_tag, imesh, &file_id_vect[0] );
2366  if( MB_SUCCESS != rval )
2367  {
2368  iFace->tag_delete( file_id_tag );
2369  return error( rval );
2370  }
2371  int invalid_count = 0;
2372  j = file_id_vect.begin();
2373  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2374  {
2375  EntityHandle h = idMap.find( *i );
2376  if( *j != h )
2377  {
2378  ++invalid_count;
2379  dbgOut.printf( 1, "Conflicting ownership for %s %ld\n", CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) ),
2380  (long)ID_FROM_HANDLE( *i ) );
2381  }
2382  }
2383  if( invalid_count )
2384  {
2385  iFace->tag_delete( file_id_tag );
2386  MB_SET_ERR( MB_FAILURE, invalid_count << " entities with conflicting ownership found by process "
2387  << myPcomm->proc_config().proc_rank()
2388  << ". This will result in duplicate entities written to file" );
2389  }
2390 #endif
2391 
2392  return iFace->tag_delete( file_id_tag );
2393 }
2394 
2395 void WriteHDF5Parallel::print_times( const double* times ) const
2396 {
2397  if( !myPcomm )
2398  {
2399  WriteHDF5::print_times( times );
2400  }
2401  else
2402  {
2403  double recv[NUM_TIMES];
2404  MPI_Reduce( (void*)times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
2405  if( 0 == myPcomm->proc_config().proc_rank() ) WriteHDF5::print_times( recv );
2406  }
2407 }
2408 
2409 } // namespace moab