Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
WriteHDF5Parallel.cpp
Go to the documentation of this file.
1 #undef DEBUG
2 #undef TIME_DEBUG
3 
4 #include <cstdarg>
5 #include <ctime>
6 #include <cstdlib>
7 
8 #include <cstring>
9 #include <cassert>
10 
11 #include <vector>
12 #include <set>
13 #include <map>
14 #include <utility>
15 #include <iostream>
16 #include <sstream>
17 #include <string>
18 
19 #include "moab/Interface.hpp"
20 #include "Internals.hpp"
21 #include "MBTagConventions.hpp"
22 #include "MBParallelConventions.h"
23 #include "moab/ParallelComm.hpp"
24 #include "moab/CN.hpp"
25 #include "moab/Range.hpp"
26 #include "moab/CpuTimer.hpp"
27 
28 #include "WriteHDF5Parallel.hpp"
29 
30 #ifndef MOAB_HAVE_HDF5
31 #error Attempt to compile WriteHDF5Parallel with HDF5 support disabled
32 #endif
33 
34 #include <H5Tpublic.h>
35 #include <H5Ppublic.h>
36 #include <H5FDmpi.h>
37 #include <H5FDmpio.h>
38 
39 #include "mhdf.h"
40 
41 #include "IODebugTrack.hpp"
42 #include "moab/FileOptions.hpp"
43 
44 namespace
45 {
46 template < bool Condition >
47 struct STATIC_ASSERTION;
48 template <>
49 struct STATIC_ASSERTION< true >
50 {
51 };
52 } // namespace
53 
54 #define PP_CAT_( a, b ) a##b
55 #define PP_CAT( a, b ) PP_CAT_( a, b )
56 #define STATIC_ASSERT( Condition ) \
57  enum \
58  { \
59  PP_CAT( dummy, __LINE__ ) = sizeof( ::STATIC_ASSERTION< (bool)( Condition ) > ) \
60  }
61 
62 namespace moab
63 {
64 
65 #ifndef _WIN32 // problematic for windows
66 // Need an MPI type that we can put handles in
67 STATIC_ASSERT( sizeof( unsigned long ) >= sizeof( EntityHandle ) );
68 
69 // Need an MPI type that we can put file IDs in
70 STATIC_ASSERT( sizeof( unsigned long ) >= sizeof( WriteHDF5::wid_t ) );
71 #endif
72 
73 // This function doesn't do anything useful. It's just a nice
74 // place to set a break point to determine why the reader fails.
75 static inline ErrorCode error( ErrorCode rval )
76 {
77  return rval;
78 }
79 
80 const char* mpi_err_str( int errorcode )
81 {
82  static char buffer[2048];
83  int len = sizeof( buffer );
84  MPI_Error_string( errorcode, buffer, &len );
85  buffer[std::min( (size_t)len, sizeof( buffer ) - 1 )] = '\0';
86  return buffer;
87 }
88 
89 #define MPI_FAILURE_MSG( A ) \
90  "MPI Failure at " __FILE__ ":%d : (Code %d) %s\n", __LINE__, (int)( A ), mpi_err_str( ( A ) )
91 
92 #define CHECK_MPI( A ) \
93  do \
94  { \
95  if( MPI_SUCCESS != ( A ) ) \
96  { \
97  MB_SET_ERR_CONT( "MPI Failure : (Code " << (int)( A ) << ") " << mpi_err_str( ( A ) ) ); \
98  dbgOut.printf( 1, MPI_FAILURE_MSG( ( A ) ) ); \
99  return error( MB_FAILURE ); \
100  } \
101  } while( false )
102 
103 #define MB_FAILURE_MSG( A ) "MOAB_Failure at " __FILE__ ":%d : %s (%d)\n", __LINE__, ErrorCodeStr[( A )], (int)( A )
104 
105 #define CHECK_MB( A ) \
106  do \
107  { \
108  if( MB_SUCCESS != ( A ) ) \
109  { \
110  MB_SET_ERR_CONT( "MOAB Failure : " << ErrorCodeStr[( A )] ); \
111  dbgOut.printf( 1, MB_FAILURE_MSG( ( A ) ) ); \
112  return error( A ); \
113  } \
114  } while( false )
115 
116 #define HDF_FAILURE_MSG( A ) "MHDF Failure at " __FILE__ ":%d : %s\n", __LINE__, mhdf_message( &( A ) )
117 
118 #define CHECK_HDF( A ) \
119  do \
120  { \
121  if( mhdf_isError( &( A ) ) ) \
122  { \
123  MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
124  dbgOut.printf( 1, HDF_FAILURE_MSG( ( A ) ) ); \
125  return error( MB_FAILURE ); \
126  } \
127  } while( false )
128 
129 #define CHECK_HDFN( A ) \
130  do \
131  { \
132  if( mhdf_isError( &( A ) ) ) \
133  { \
134  MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
135  return error( MB_FAILURE ); \
136  } \
137  } while( false )
138 
139 #ifdef VALGRIND
140 #include <valgrind/memcheck.h>
141 
142 template < typename T >
143 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v )
144 {
145  if( v.size() )
146  {
147  }
148  (void)VALGRIND_MAKE_MEM_UNDEFINED( &v[0], v.size() * sizeof( T ) );
149 }
150 
151 #else
152 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
153 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
154 #endif
155 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
156 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
157 #endif
158 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
159 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
160 #endif
161 
162 template < typename T >
163 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& )
164 {
165  /* Nothing to do */
166 }
167 
168 #endif
169 
170 #ifndef NDEBUG
171 #define START_SERIAL \
172  for( unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x ) \
173  { \
174  MPI_Barrier( myPcomm->proc_config().proc_comm() ); \
175  if( _x != myPcomm->proc_config().proc_rank() ) continue
176 #define END_SERIAL \
177  } \
178  MPI_Barrier( myPcomm->proc_config().proc_comm() )
179 #else
180 #define START_SERIAL
181 #define END_SERIAL
182 #endif
183 
184 static int my_Gatherv( void* sendbuf,
185  int sendcount,
186  MPI_Datatype sendtype,
187  std::vector< unsigned char >& recvbuf,
188  std::vector< int >& recvcounts,
189  int root,
190  MPI_Comm comm )
191 {
192  int nproc, rank, bytes, err;
193  MPI_Comm_size( comm, &nproc );
194  MPI_Comm_rank( comm, &rank );
195  MPI_Type_size( sendtype, &bytes );
196 
197  recvcounts.resize( rank == root ? nproc : 0 );
198  err = MPI_Gather( &sendcount, 1, MPI_INT, &recvcounts[0], 1, MPI_INT, root, comm );
199  if( MPI_SUCCESS != err ) return err;
200 
201  std::vector< int > disp( recvcounts.size() );
202  if( root == rank )
203  {
204  disp[0] = 0;
205  for( int i = 1; i < nproc; ++i )
206  disp[i] = disp[i - 1] + recvcounts[i - 1];
207  recvbuf.resize( bytes * ( disp.back() + recvcounts.back() ) );
208  }
209 
210  return MPI_Gatherv( sendbuf, sendcount, sendtype, &recvbuf[0], &recvcounts[0], &disp[0], sendtype, root, comm );
211 }
212 
213 static void print_type_sets( Interface* iFace, DebugOutput* str, Range& sets )
214 {
215  const unsigned VB = 2;
216  if( str->get_verbosity() < VB ) return;
217 
218  Tag gid, did, bid, sid, nid;
219  gid = iFace->globalId_tag();
224  Range typesets[10];
225  const char* typenames[] = { "Block ", "Sideset ", "NodeSet", "Vertex", "Curve",
226  "Surface", "Volume", "Body", "Other" };
227  for( Range::iterator riter = sets.begin(); riter != sets.end(); ++riter )
228  {
229  unsigned dim, id; //, oldsize;
230  if( MB_SUCCESS == iFace->tag_get_data( bid, &*riter, 1, &id ) )
231  dim = 0;
232  else if( MB_SUCCESS == iFace->tag_get_data( sid, &*riter, 1, &id ) )
233  dim = 1;
234  else if( MB_SUCCESS == iFace->tag_get_data( nid, &*riter, 1, &id ) )
235  dim = 2;
236  else if( MB_SUCCESS == iFace->tag_get_data( did, &*riter, 1, &dim ) )
237  {
238  id = 0;
239  iFace->tag_get_data( gid, &*riter, 1, &id );
240  dim += 3;
241  }
242  else
243  {
244  id = *riter;
245  dim = 9;
246  }
247 
248  // oldsize = typesets[dim].size();
249  typesets[dim].insert( id );
250  // assert(typesets[dim].size() - oldsize == 1);
251  }
252  for( int ii = 0; ii < 9; ++ii )
253  {
254  char tmp[64];
255  sprintf( tmp, "%s (%lu) ", typenames[ii], (unsigned long)typesets[ii].size() );
256  str->print( VB, tmp, typesets[ii] );
257  }
258  str->printf( VB, "Total: %lu\n", (unsigned long)sets.size() );
259 }
260 
261 #define debug_barrier() debug_barrier_line( __LINE__ )
262 
264 {
265  const unsigned threshold = 2;
266  static unsigned long count = 0;
267  if( dbgOut.get_verbosity() >= threshold && myPcomm )
268  {
269  dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
270  MPI_Barrier( myPcomm->proc_config().proc_comm() );
271  }
272 }
273 
275 {
276  return new WriteHDF5Parallel( iface );
277 }
278 
280  : WriteHDF5( iface ), myPcomm( NULL ), pcommAllocated( false ), hslabOp( H5S_SELECT_OR )
281 {
282 }
283 
285 {
286  if( pcommAllocated && myPcomm ) delete myPcomm;
287 }
288 
289 // The parent WriteHDF5 class has ExportSet structs that are
290 // populated with the entities to be written, grouped by type
291 // (and for elements, connectivity length). This function:
292 // o determines which entities are to be written by a remote processor
293 // o removes those entities from the ExportSet structs in WriteMesh
294 // o passes them back in a Range
296 {
297  ErrorCode result;
298 
299  // START_SERIAL;
300  dbgOut.print( 3, "Pre-interface mesh:\n" );
301  dbgOut.print( 3, nodeSet.range );
302  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
303  dbgOut.print( 3, eiter->range );
304  dbgOut.print( 3, setSet.range );
305 
306  // Move handles of non-owned entities from lists of entities
307  // that this processor will write to the 'nonowned' list.
308 
309  nonowned.clear();
310  result = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &nonowned );
311  if( MB_SUCCESS != result ) return error( result );
312  nodeSet.range = subtract( nodeSet.range, nonowned );
313 
314  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
315  {
316  Range tmpset;
317  result = myPcomm->filter_pstatus( eiter->range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &tmpset );
318  if( MB_SUCCESS != result ) return error( result );
319  eiter->range = subtract( eiter->range, tmpset );
320  nonowned.merge( tmpset );
321  }
322 
323  dbgOut.print( 3, "Post-interface mesh:\n" );
324  dbgOut.print( 3, nodeSet.range );
325  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
326  dbgOut.print( 3, eiter->range );
327  dbgOut.print( 3, setSet.range );
328 
329  // END_SERIAL;
330 
331  return MB_SUCCESS;
332 }
333 
335  bool overwrite,
336  const std::vector< std::string >& qa_records,
337  const FileOptions& opts,
338  const Tag* user_tag_list,
339  int user_tag_count,
340  int dimension,
341  double* times )
342 {
343  ErrorCode rval;
344  mhdf_Status status;
345 
346  int pcomm_no = 0;
347  opts.get_int_option( "PARALLEL_COMM", pcomm_no );
348 
349  myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
350  if( 0 == myPcomm )
351  {
352  myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD );
353  pcommAllocated = true;
354  }
355 
356  MPI_Info info = MPI_INFO_NULL;
357  std::string cb_size;
358  rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
359  if( MB_SUCCESS == rval )
360  {
361  MPI_Info_create( &info );
362  MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
363  }
364 
367  Range nonlocal;
368  debug_barrier();
369  dbgOut.tprint( 1, "Gathering interface meshes\n" );
370  rval = gather_interface_meshes( nonlocal );
371  if( MB_SUCCESS != rval ) return error( rval );
372 
373  /**************** Get tag names for sets likely to be shared ***********/
374  // debug_barrier();
375  // dbgOut.tprint(1, "Getting shared entity sets\n");
376  // rval = get_sharedset_tags();
377  // if (MB_SUCCESS != rval) return error(rval);
378 
379  /**************** Create actual file and write meta info ***************/
380 
381  debug_barrier();
382  if( myPcomm->proc_config().proc_rank() == 0 )
383  {
384  dbgOut.tprintf( 1, "Creating file: %s\n", filename );
385 
386  // Create the file
387  const char* type_names[MBMAXTYPE];
388  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
389  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
390  type_names[i] = CN::EntityTypeName( i );
391 
392  dbgOut.tprint( 1, "call mhdf_createFile\n" );
393  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
394  if( !filePtr )
395  {
396  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
397  }
398 
399  dbgOut.tprint( 1, "call write_qa\n" );
400  rval = write_qa( qa_records );
401  if( MB_SUCCESS != rval ) return error( rval );
402  }
403 
404  /**************** Create node coordinate table ***************/
405  CpuTimer timer;
406  debug_barrier();
407  dbgOut.tprint( 1, "creating node table\n" );
408  topState.start( "creating node table" );
409  rval = create_node_table( dimension );
410  topState.end( rval );
411  if( MB_SUCCESS != rval ) return error( rval );
412  if( times ) times[CREATE_NODE_TIME] = timer.time_elapsed();
413 
414  /**************** Create element tables ***************/
415 
416  debug_barrier();
417  dbgOut.tprint( 1, "negotiating element types\n" );
418  topState.start( "negotiating element types" );
419  rval = negotiate_type_list();
420  topState.end( rval );
421  if( MB_SUCCESS != rval ) return error( rval );
422  if( times ) times[NEGOTIATE_TYPES_TIME] = timer.time_elapsed();
423  dbgOut.tprint( 1, "creating element table\n" );
424  topState.start( "creating element tables" );
425  rval = create_element_tables();
426  topState.end( rval );
427  if( MB_SUCCESS != rval ) return error( rval );
428  if( times ) times[CREATE_ELEM_TIME] = timer.time_elapsed();
429 
430  /*************** Exchange file IDs *****************/
431 
432  debug_barrier();
433  dbgOut.tprint( 1, "communicating file ids\n" );
434  topState.start( "communicating file ids" );
435  rval = exchange_file_ids( nonlocal );
436  topState.end( rval );
437  if( MB_SUCCESS != rval ) return error( rval );
438  if( times ) times[FILEID_EXCHANGE_TIME] = timer.time_elapsed();
439 
440  /**************** Create meshset tables *********************/
441 
442  debug_barrier();
443  dbgOut.tprint( 1, "creating meshset table\n" );
444  topState.start( "creating meshset tables" );
445  rval = create_meshset_tables( times );
446  topState.end( rval );
447  if( MB_SUCCESS != rval ) return error( rval );
448  if( times ) times[CREATE_SET_TIME] = timer.time_elapsed();
449 
450  /**************** Create adjacency tables *********************/
451 
452  debug_barrier();
453  dbgOut.tprint( 1, "creating adjacency table\n" );
454  topState.start( "creating adjacency tables" );
455  rval = create_adjacency_tables();
456  topState.end( rval );
457  if( MB_SUCCESS != rval ) return error( rval );
458  if( times ) times[CREATE_ADJ_TIME] = timer.time_elapsed();
459 
460  /**************** Create tag data *********************/
461 
462  debug_barrier();
463  dbgOut.tprint( 1, "creating tag tables\n" );
464  topState.start( "creating tag tables" );
465  rval = gather_tags( user_tag_list, user_tag_count );
466  if( MB_SUCCESS != rval ) return error( rval );
467  rval = create_tag_tables();
468  topState.end( rval );
469  if( MB_SUCCESS != rval ) return error( rval );
470  if( times ) times[CREATE_TAG_TIME] = timer.time_elapsed();
471 
472  /************** Close serial file and reopen parallel *****************/
473 
474  if( 0 == myPcomm->proc_config().proc_rank() ) mhdf_closeFile( filePtr, &status );
475 
476  MPI_Barrier( myPcomm->proc_config().proc_comm() );
477  dbgOut.tprint( 1, "(re)opening file in parallel mode\n" );
478  unsigned long junk;
479  hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS );
480  H5Pset_fapl_mpio( hdf_opt, myPcomm->proc_config().proc_comm(), info );
481  filePtr = mhdf_openFileWithOpt( filename, 1, &junk, id_type, hdf_opt, &status );
482  H5Pclose( hdf_opt );
483  if( !filePtr )
484  {
485  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
486  }
487 
488  if( collectiveIO )
489  {
490  dbgOut.print( 1, "USING COLLECTIVE IO\n" );
491  writeProp = H5Pcreate( H5P_DATASET_XFER );
492  H5Pset_dxpl_mpio( writeProp, H5FD_MPIO_COLLECTIVE );
493  }
494 
495  /* Test if we can use H5S_APPEND when selecting hyperslabs */
496  if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
497  ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
498  {
499  dbgOut.print( 1, "HDF5 library supports H5Sselect_hyperlsab with H5S_SELECT_APPEND\n" );
500  hslabOp = H5S_SELECT_APPEND;
501  }
502 
503  dbgOut.tprint( 1, "Exiting parallel_create_file\n" );
504  return MB_SUCCESS;
505 }
506 
508 {
510  std::string name1, name2;
511 
512  public:
514  bool operator()( const WriteHDF5::TagDesc& t1, const WriteHDF5::TagDesc& t2 );
515 };
516 
518 {
519  iFace->tag_get_name( t1.tag_id, name1 );
520  iFace->tag_get_name( t2.tag_id, name2 );
521  return name1 < name2;
522 }
523 
525 {
528  int size;
529  int name_len;
531  char name[sizeof( unsigned long )];
532 
533  static size_t pad( size_t len )
534  {
535  if( len % sizeof( unsigned long ) )
536  return len + sizeof( unsigned long ) - len % sizeof( unsigned long );
537  else
538  return len;
539  }
540 
541  static size_t def_val_bytes( int def_val_len, DataType type )
542  {
543  switch( type )
544  {
545  case MB_TYPE_BIT:
546  return def_val_len ? 1 : 0;
547  case MB_TYPE_OPAQUE:
548  return def_val_len;
549  case MB_TYPE_INTEGER:
550  return def_val_len * sizeof( int );
551  case MB_TYPE_DOUBLE:
552  return def_val_len * sizeof( double );
553  case MB_TYPE_HANDLE:
554  return def_val_len * sizeof( EntityHandle );
555  }
556  return 0;
557  }
558 
559  static size_t len( int name_len, int def_val_len, DataType type )
560  {
561  return sizeof( serial_tag_data ) + pad( name_len + def_val_bytes( def_val_len, type ) ) -
562  sizeof( unsigned long );
563  }
564  size_t len() const
565  {
566  return len( name_len, def_val_len, type );
567  }
569  {
570  return def_val_len ? name + name_len : 0;
571  }
572  const void* default_value() const
573  {
574  return const_cast< serial_tag_data* >( this )->default_value();
575  }
576  void set_default_value( const void* val )
577  {
578  memcpy( default_value(), val, def_val_bytes( def_val_len, type ) );
579  }
580 };
581 
583  const WriteHDF5::TagDesc& tag )
584 {
585  ErrorCode rval;
586 
587  std::string name;
588  rval = iFace->tag_get_name( tag.tag_id, name );
589  if( MB_SUCCESS != rval ) return error( rval );
590 
591  // Get name length, including space for null char
592  size_t name_len = name.size() + 1;
593  if( name_len == 1 ) return MB_SUCCESS; // Skip tags with no name
594 
595  DataType data_type;
596  rval = iFace->tag_get_data_type( tag.tag_id, data_type );
597  if( MB_SUCCESS != rval ) return error( rval );
598 
599  // Get default value
600  int def_val_len;
601  const void* def_val;
602  if( MB_SUCCESS != iFace->tag_get_default_value( tag.tag_id, def_val, def_val_len ) )
603  {
604  def_val_len = 0;
605  def_val = 0;
606  }
607 
608  // Allocate struct within buffer
609  size_t init_size = buffer.size();
610  buffer.resize( init_size + serial_tag_data::len( name_len, def_val_len, data_type ) );
611  serial_tag_data* ptr = reinterpret_cast< serial_tag_data* >( &buffer[init_size] );
612 
613  // Populate struct
614  rval = iFace->tag_get_type( tag.tag_id, ptr->storage );
615  if( MB_SUCCESS != rval ) return error( rval );
616  ptr->type = data_type;
617  rval = iFace->tag_get_length( tag.tag_id, ptr->size );
618  if( MB_VARIABLE_DATA_LENGTH == rval )
619  ptr->size = MB_VARIABLE_LENGTH;
620  else if( MB_SUCCESS != rval )
621  return error( rval );
622  ptr->name_len = name_len;
623  Range range;
624  memset( ptr->name, 0, ptr->name_len );
625  memcpy( ptr->name, name.data(), name.size() );
626  ptr->def_val_len = def_val_len;
627  ptr->set_default_value( def_val );
628 
629  return MB_SUCCESS;
630 }
631 
632 ErrorCode WriteHDF5Parallel::check_serial_tag_data( const std::vector< unsigned char >& buffer,
633  std::vector< TagDesc* >* missing,
634  std::vector< TagDesc* >* newlist )
635 {
636  ErrorCode rval;
637 
638  // Use 'write_sparse' field as a 'visited' mark
639  std::list< TagDesc >::iterator tag_iter;
640  if( missing )
641  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
642  tag_iter->write_sparse = true;
643 
644  // Use a set as a temporary for what will ultimately go in
645  // newlist because we need to pass back newlist in the order
646  // of the tagList member.
647  std::set< TagDesc* > newset;
648 
649  // Iterate over data from, updating the local list of tags.
650  // Be careful to keep tagList sorted such that in the end all
651  // procs have the same list in the same order.
652  std::vector< unsigned char >::const_iterator diter = buffer.begin();
653  tag_iter = tagList.begin();
654  while( diter < buffer.end() )
655  {
656  // Get struct from buffer
657  const serial_tag_data* ptr = reinterpret_cast< const serial_tag_data* >( &*diter );
658 
659  // Find local struct for tag
660  std::string name( ptr->name );
661  std::string n;
662  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
663  if( n > name )
664  {
665  tag_iter = tagList.begin(); // New proc, start search from beginning
666  }
667  iFace->tag_get_name( tag_iter->tag_id, n );
668  while( n < name )
669  {
670  ++tag_iter;
671  if( tag_iter == tagList.end() ) break;
672  iFace->tag_get_name( tag_iter->tag_id, n );
673  }
674  if( tag_iter == tagList.end() || n != name )
675  { // New tag
676  TagDesc newtag;
677 
678  if( ptr->size == MB_VARIABLE_LENGTH )
679  rval = iFace->tag_get_handle( name.c_str(), ptr->def_val_len, ptr->type, newtag.tag_id,
680  MB_TAG_VARLEN | MB_TAG_CREAT | ptr->storage, ptr->default_value() );
681  else
682  rval = iFace->tag_get_handle( name.c_str(), ptr->size, ptr->type, newtag.tag_id,
683  MB_TAG_CREAT | ptr->storage, ptr->default_value() );
684  if( MB_SUCCESS != rval ) return error( rval );
685 
686  newtag.sparse_offset = 0;
687  newtag.var_data_offset = 0;
688  newtag.write_sparse = false;
689  newtag.max_num_ents = 0;
690  newtag.max_num_vals = 0;
691 
692  tag_iter = tagList.insert( tag_iter, newtag );
693  if( newlist ) newset.insert( &*tag_iter );
694  }
695  else
696  { // Check that tag is as expected
697  DataType type;
698  iFace->tag_get_data_type( tag_iter->tag_id, type );
699  if( type != ptr->type )
700  {
701  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent data type for tag \"" << name << "\"" );
702  }
703  int size;
704  iFace->tag_get_length( tag_iter->tag_id, size );
705  if( size != ptr->size )
706  {
707  MB_SET_ERR( MB_FAILURE, "Processes have inconsistent size for tag \"" << name << "\"" );
708  }
709  tag_iter->write_sparse = false;
710  }
711 
712  // Step to next variable-length struct.
713  diter += ptr->len();
714  }
715 
716  // Now pass back any local tags that weren't in the buffer
717  if( missing )
718  {
719  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
720  {
721  if( tag_iter->write_sparse )
722  {
723  tag_iter->write_sparse = false;
724  missing->push_back( &*tag_iter );
725  }
726  }
727  }
728 
729  // Be careful to populate newlist in the same, sorted, order as tagList
730  if( newlist )
731  {
732  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
733  if( newset.find( &*tag_iter ) != newset.end() ) newlist->push_back( &*tag_iter );
734  }
735 
736  return MB_SUCCESS;
737 }
738 
739 static void set_bit( int position, unsigned char* bytes )
740 {
741  int byte = position / 8;
742  int bit = position % 8;
743  bytes[byte] |= ( ( (unsigned char)1 ) << bit );
744 }
745 
746 static bool get_bit( int position, const unsigned char* bytes )
747 {
748  int byte = position / 8;
749  int bit = position % 8;
750  return 0 != ( bytes[byte] & ( ( (unsigned char)1 ) << bit ) );
751 }
752 
754 {
755  std::list< TagDesc >::iterator tag_iter;
756  ErrorCode rval;
757  int err;
758  const int rank = myPcomm->proc_config().proc_rank();
759  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
760 
761  subState.start( "negotiating tag list" );
762 
763  dbgOut.tprint( 1, "communicating tag metadata\n" );
764 
765  dbgOut.printf( 2, "Exchanging tag data for %d tags.\n", (int)tagList.size() );
766 
767  // Sort tagList contents in alphabetical order by tag name
768  tagList.sort( TagNameCompare( iFace ) );
769 
770  // Negotiate total list of tags to write
771 
772  // Build concatenated list of all tag data
773  std::vector< unsigned char > tag_buffer;
774  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
775  {
776  rval = append_serial_tag_data( tag_buffer, *tag_iter );
777  CHECK_MB( rval );
778  }
779 
780  // Broadcast list from root to all other procs
781  unsigned long size = tag_buffer.size();
782  err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, comm );
783  CHECK_MPI( err );
784  tag_buffer.resize( size );
785  err = MPI_Bcast( &tag_buffer[0], size, MPI_UNSIGNED_CHAR, 0, comm );
786  CHECK_MPI( err );
787 
788  // Update local tag list
789  std::vector< TagDesc* > missing;
790  rval = check_serial_tag_data( tag_buffer, &missing, 0 );
791  CHECK_MB( rval );
792 
793  // Check if we're done (0->done, 1->more, 2+->error)
794  int code, lcode = ( MB_SUCCESS != rval ) ? rval + 2 : missing.empty() ? 0 : 1;
795  err = MPI_Allreduce( &lcode, &code, 1, MPI_INT, MPI_MAX, comm );
796  CHECK_MPI( err );
797  if( code > 1 )
798  {
799  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
800  return error( (ErrorCode)( code - 2 ) );
801  }
802 
803  // If not done...
804  if( code )
805  {
806  dbgOut.print( 1, "Not all procs had same tag definitions, negotiating...\n" );
807 
808  // Get tags defined on this proc but not on root proc
809  tag_buffer.clear();
810  for( size_t i = 0; i < missing.size(); ++i )
811  {
812  rval = append_serial_tag_data( tag_buffer, *missing[i] );
813  CHECK_MB( rval );
814  }
815 
816  // Gather extra tag definitions on root processor
817  std::vector< int > junk; // don't care how many from each proc
818  assert( rank || tag_buffer.empty() ); // must be empty on root
819  err = my_Gatherv( &tag_buffer[0], tag_buffer.size(), MPI_UNSIGNED_CHAR, tag_buffer, junk, 0, comm );
820  CHECK_MPI( err );
821 
822  // Process serialized tag descriptions on root, and
823  rval = MB_SUCCESS;
824  if( 0 == rank )
825  {
826  // Process serialized tag descriptions on root, and
827  std::vector< TagDesc* > newlist;
828  rval = check_serial_tag_data( tag_buffer, 0, &newlist );
829  tag_buffer.clear();
830  // re-serialize a unique list of new tag definitions
831  for( size_t i = 0; MB_SUCCESS == rval && i != newlist.size(); ++i )
832  {
833  rval = append_serial_tag_data( tag_buffer, *newlist[i] );
834  CHECK_MB( rval );
835  }
836  }
837 
838  // Broadcast any new tag definitions from root to other procs
839  long this_size = tag_buffer.size();
840  if( MB_SUCCESS != rval ) this_size = -rval;
841  err = MPI_Bcast( &this_size, 1, MPI_LONG, 0, comm );
842  CHECK_MPI( err );
843  if( this_size < 0 )
844  {
845  MB_SET_ERR_CONT( "Inconsistent tag definitions between procs" );
846  return error( (ErrorCode)-this_size );
847  }
848  tag_buffer.resize( this_size );
849  err = MPI_Bcast( &tag_buffer[0], this_size, MPI_UNSIGNED_CHAR, 0, comm );
850  CHECK_MPI( err );
851 
852  // Process new tag definitions
853  rval = check_serial_tag_data( tag_buffer, 0, 0 );
854  CHECK_MB( rval );
855  }
856 
857  subState.end();
858  subState.start( "negotiate which element/tag combinations are dense" );
859 
860  // Figure out for which tag/element combinations we can
861  // write dense tag data.
862 
863  // Construct a table of bits,
864  // where each row of the table corresponds to a tag
865  // and each column to an element group.
866 
867  // Two extra, because first is nodes and last is sets.
868  // (n+7)/8 is ceil(n/8)
869  const int bytes_per_tag = ( exportList.size() + 9 ) / 8;
870  std::vector< unsigned char > data( bytes_per_tag * tagList.size(), 0 );
871  std::vector< unsigned char > recv( data.size(), 0 );
872  unsigned char* iter = &data[0];
873  if( writeTagDense && !data.empty() )
874  {
875  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, iter += bytes_per_tag )
876  {
877 
878  Range tagged;
879  rval = get_sparse_tagged_entities( *tag_iter, tagged );
880  CHECK_MB( rval );
881 
882  int s;
883  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ) continue;
884 
885  std::string n;
886  iFace->tag_get_name( tag_iter->tag_id,
887  n ); // Second time we've called, so shouldn't fail
888 
889  // Check if we want to write this tag in dense format even if not
890  // all of the entities have a tag value. The criterion of this
891  // is that the tag be dense, have a default value, and have at
892  // least 2/3 of the entities tagged.
893  bool prefer_dense = false;
894  TagType type;
895  rval = iFace->tag_get_type( tag_iter->tag_id, type );
896  CHECK_MB( rval );
897  if( MB_TAG_DENSE == type )
898  {
899  const void* defval = 0;
900  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
901  if( MB_SUCCESS == rval ) prefer_dense = true;
902  }
903 
904  int i = 0;
905  if( check_dense_format_tag( nodeSet, tagged, prefer_dense ) )
906  {
907  set_bit( i, iter );
908  dbgOut.printf( 2, "Can write dense data for \"%s\"/Nodes\n", n.c_str() );
909  }
910  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
911  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
912  {
913  // when writing in parallel, on some partitions, some of these element ranges might
914  // be empty so do not turn this tag as sparse, just because of that, leave it dense,
915  // if we prefer dense
916  if( ( prefer_dense && ex_iter->range.empty() ) ||
917  check_dense_format_tag( *ex_iter, tagged, prefer_dense ) )
918  {
919  set_bit( i, iter );
920  dbgOut.printf( 2, "Can write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
921  }
922  }
923  if( check_dense_format_tag( setSet, tagged, prefer_dense ) )
924  {
925  set_bit( i, iter );
926  dbgOut.printf( 2, "Can write dense data for \"%s\"/Sets\n", n.c_str() );
927  }
928  }
929 
930  // Do bit-wise AND of list over all processors (only write dense format
931  // if all proccesses want dense format for this group of entities).
932  err = MPI_Allreduce( &data[0], &recv[0], data.size(), MPI_UNSIGNED_CHAR, MPI_BAND,
934  CHECK_MPI( err );
935  } // if (writeTagDense)
936 
937  // Store initial counts for sparse-formatted tag data.
938  // The total number of values to send and receive will be the number of
939  // tags plus the number of var-len tags because we need to negotiate
940  // offsets into two different tables for the var-len tags.
941  std::vector< long > counts;
942 
943  // Record dense tag/element combinations
944  iter = &recv[0];
945  const unsigned char* iter2 = &data[0];
946  for( tag_iter = tagList.begin(); tag_iter != tagList.end();
947  ++tag_iter, iter += bytes_per_tag, iter2 += bytes_per_tag )
948  {
949 
950  Range tagged;
951  rval = get_sparse_tagged_entities( *tag_iter, tagged );
952  CHECK_MB( rval );
953 
954  std::string n;
955  iFace->tag_get_name( tag_iter->tag_id, n ); // Second time we've called, so shouldn't fail
956 
957  int i = 0;
958  if( get_bit( i, iter ) )
959  {
960  assert( get_bit( i, iter2 ) );
961  tag_iter->dense_list.push_back( nodeSet );
962  tagged -= nodeSet.range;
963  dbgOut.printf( 2, "Will write dense data for \"%s\"/Nodes\n", n.c_str() );
964  }
965  std::list< ExportSet >::const_iterator ex_iter = exportList.begin();
966  for( ++i; ex_iter != exportList.end(); ++i, ++ex_iter )
967  {
968  if( get_bit( i, iter ) )
969  {
970  assert( get_bit( i, iter2 ) );
971  tag_iter->dense_list.push_back( *ex_iter );
972  dbgOut.printf( 2, "WIll write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
973  tagged -= ex_iter->range;
974  }
975  }
976  if( get_bit( i, iter ) )
977  {
978  assert( get_bit( i, iter2 ) );
979  tag_iter->dense_list.push_back( setSet );
980  dbgOut.printf( 2, "Will write dense data for \"%s\"/Sets\n", n.c_str() );
981  tagged -= setSet.range;
982  }
983 
984  counts.push_back( tagged.size() );
985 
986  int s;
987  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
988  {
989  unsigned long data_len;
990  rval = get_tag_data_length( *tag_iter, tagged, data_len );
991  CHECK_MB( rval );
992  counts.push_back( data_len );
993  }
994  }
995 
996  subState.end();
997  subState.start( "Negotiate offsets for sparse tag info" );
998 
999  std::vector< long > offsets( counts.size() ), maxima( counts.size() ), totals( counts.size() );
1000  rval = create_dataset( counts.size(), &counts[0], &offsets[0], &maxima[0], &totals[0] );
1001  CHECK_MB( rval );
1002 
1003  // Copy values into local structs and if root then create tables
1004  size_t idx = 0;
1005  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++idx )
1006  {
1007  assert( idx < counts.size() );
1008  tag_iter->sparse_offset = offsets[idx];
1009  tag_iter->max_num_ents = maxima[idx];
1010  tag_iter->write_sparse = ( 0 != totals[idx] );
1011  int s;
1012  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1013  {
1014  ++idx;
1015  assert( idx < counts.size() );
1016  tag_iter->var_data_offset = offsets[idx];
1017  tag_iter->max_num_vals = maxima[idx];
1018  }
1019  else
1020  {
1021  tag_iter->var_data_offset = 0;
1022  tag_iter->max_num_vals = 0;
1023  }
1024  }
1025 
1026  subState.end();
1027 
1028  // Create tag tables on root process
1029  if( 0 == myPcomm->proc_config().proc_rank() )
1030  {
1031  size_t iidx = 0;
1032  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter, ++iidx )
1033  {
1034  assert( iidx < totals.size() );
1035  unsigned long num_ents = totals[iidx];
1036  unsigned long num_val = 0;
1037  int s;
1038  if( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) )
1039  {
1040  ++iidx;
1041  assert( iidx < totals.size() );
1042  num_val = totals[iidx];
1043  }
1044  dbgOut.printf( 2, "Writing tag description for tag 0x%lx with %lu values\n",
1045  (unsigned long)tag_iter->tag_id, num_val ? num_val : num_ents );
1046 
1047  rval = create_tag( *tag_iter, num_ents, num_val );
1048  if( MB_SUCCESS != rval ) return error( rval );
1049  }
1050  }
1051 
1052  if( dbgOut.get_verbosity() > 1 )
1053  {
1054  dbgOut.printf( 2, "Tags: %12s %8s %8s %8s %8s %8s\n", "Name", "Count", "Offset", "Var Off", "Max Ent",
1055  "Handle" );
1056 
1057  for( tag_iter = tagList.begin(); tag_iter != tagList.end(); ++tag_iter )
1058  {
1059  std::string name;
1060  iFace->tag_get_name( tag_iter->tag_id, name );
1061  size_t this_size;
1062  get_num_sparse_tagged_entities( *tag_iter, this_size );
1063  dbgOut.printf( 2, "%18s %8lu %8lu %8lu %8lu 0x%7lx\n", name.c_str(), (unsigned long)this_size,
1064  (unsigned long)tag_iter->sparse_offset, (unsigned long)tag_iter->var_data_offset,
1065  (unsigned long)tag_iter->max_num_ents, (unsigned long)tag_iter->tag_id );
1066  }
1067  }
1068 
1069  return MB_SUCCESS;
1070 }
1071 
1073 {
1074  long start_id;
1076  long total;
1077 };
1078 STATIC_ASSERT( ( sizeof( DatasetVals ) == 3 * sizeof( long ) ) );
1079 
1081  const long* num_owned,
1082  long* offsets_out,
1083  long* max_proc_entities,
1084  long* total_entities,
1085  const DataSetCreator& creator,
1086  ExportSet* groups[],
1087  wid_t* first_ids_out )
1088 {
1089  int result;
1090  ErrorCode rval;
1091  const unsigned rank = myPcomm->proc_config().proc_rank();
1092  const unsigned nproc = myPcomm->proc_config().proc_size();
1093  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1094 
1095  // Gather entity counts for each processor on root
1096  std::vector< long > counts( rank ? 0 : nproc * num_datasets );
1097  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &num_owned, sizeof( long ) );
1098  result = MPI_Gather( const_cast< long* >( num_owned ), num_datasets, MPI_LONG, &counts[0], num_datasets, MPI_LONG,
1099  0, comm );
1100  CHECK_MPI( result );
1101 
1102  // Create node data in file
1103  DatasetVals zero_val = { 0, 0, 0 };
1104  std::vector< DatasetVals > cumulative( num_datasets, zero_val );
1105  if( rank == 0 )
1106  {
1107  for( unsigned i = 0; i < nproc; i++ )
1108  {
1109  const long* proc_data = &counts[i * num_datasets];
1110  for( int index = 0; index < num_datasets; ++index )
1111  {
1112  cumulative[index].total += proc_data[index];
1113  if( proc_data[index] > cumulative[index].max_count ) cumulative[index].max_count = proc_data[index];
1114  }
1115  }
1116 
1117  for( int index = 0; index < num_datasets; ++index )
1118  {
1119  if( cumulative[index].total )
1120  {
1121  rval = creator( this, cumulative[index].total, groups ? groups[index] : 0, cumulative[index].start_id );
1122  CHECK_MB( rval );
1123  }
1124  else
1125  {
1126  cumulative[index].start_id = -1;
1127  }
1128  }
1129  }
1130 
1131  // Send id offset to every proc
1132  result = MPI_Bcast( (void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm );
1133  CHECK_MPI( result );
1134  for( int index = 0; index < num_datasets; ++index )
1135  {
1136  if( first_ids_out ) first_ids_out[index] = (wid_t)cumulative[index].start_id;
1137  max_proc_entities[index] = cumulative[index].max_count;
1138  total_entities[index] = cumulative[index].total;
1139  }
1140 
1141  // Convert array of per-process counts to per-process offsets
1142  if( rank == 0 )
1143  {
1144  // Initialize prev_size with data sizes for root process
1145  std::vector< long > prev_size( counts.begin(), counts.begin() + num_datasets );
1146  // Root process gets offset zero
1147  std::fill( counts.begin(), counts.begin() + num_datasets, 0L );
1148  // For each proc other than this one (root)
1149  for( unsigned i = 1; i < nproc; ++i )
1150  {
1151  // Get pointer to offsets for previous process in list
1152  long* prev_data = &counts[( i - 1 ) * num_datasets];
1153  // Get pointer to offsets for this process in list
1154  long* proc_data = &counts[i * num_datasets];
1155  // For each data set
1156  for( int j = 0; j < num_datasets; ++j )
1157  {
1158  // Get size of data in dataset from process i
1159  long mysize = proc_data[j];
1160  // Offset for process i is offset of previous process plus
1161  // number of values previous process will write
1162  proc_data[j] = prev_data[j] + prev_size[j];
1163  // Store my size, as it is no longer available in 'counts'
1164  prev_size[j] = mysize;
1165  }
1166  }
1167  }
1168 
1169  // Send each proc it's offset in the table
1170  if( rank == 0 )
1171  {
1172  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &counts[0], num_datasets * nproc * sizeof( long ) );
1173  }
1174  result = MPI_Scatter( &counts[0], num_datasets, MPI_LONG, offsets_out, num_datasets, MPI_LONG, 0, comm );
1175  CHECK_MPI( result );
1176 
1177  return MB_SUCCESS;
1178 }
1179 
1181 {
1182  nodeSet.num_nodes = dimension; // Put it here so NodeSetCreator can access it
1183  struct NodeSetCreator : public DataSetCreator
1184  {
1185  ErrorCode operator()( WriteHDF5* file, long count, const ExportSet* group, long& start_id ) const
1186  {
1187  mhdf_Status status;
1188  hid_t handle = mhdf_createNodeCoords( file->file_ptr(), group->num_nodes, count, &start_id, &status );
1189  CHECK_HDFN( status );
1190  mhdf_closeData( file->file_ptr(), handle, &status );
1191  CHECK_HDFN( status );
1192  return MB_SUCCESS;
1193  }
1194  };
1195 
1196  const long count = nodeSet.range.size();
1197  ExportSet* array[] = { &nodeSet };
1199  NodeSetCreator(), array, &nodeSet.first_id );
1200  CHECK_MB( rval );
1202 }
1203 
1204 struct elemtype
1205 {
1206  int mbtype;
1207  int numnode;
1208 
1209  elemtype( int vals[2] ) : mbtype( vals[0] ), numnode( vals[1] ) {}
1210  elemtype( int t, int n ) : mbtype( t ), numnode( n ) {}
1211 
1212  bool operator==( const elemtype& other ) const
1213  {
1214  return mbtype == other.mbtype && ( mbtype == MBENTITYSET || numnode == other.numnode );
1215  }
1216  bool operator<( const elemtype& other ) const
1217  {
1218  if( mbtype > other.mbtype ) return false;
1219 
1220  return mbtype < other.mbtype || ( mbtype != MBENTITYSET && numnode < other.numnode );
1221  }
1222  bool operator!=( const elemtype& other ) const
1223  {
1224  return !this->operator==( other );
1225  }
1226 };
1227 
1229 {
1230  int result;
1231  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1232 
1233  exportList.sort();
1234  int num_types = exportList.size();
1235 
1236  // Get list of types on this processor
1237  typedef std::vector< std::pair< int, int > > typelist;
1238  typelist my_types( num_types );
1239  (void)VALGRIND_MAKE_VEC_UNDEFINED( my_types );
1240  typelist::iterator viter = my_types.begin();
1241  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
1242  {
1243  viter->first = eiter->type;
1244  viter->second = eiter->num_nodes;
1245  ++viter;
1246  }
1247 
1248  dbgOut.print( 2, "Local Element Types:\n" );
1249  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1250  {
1251  int type = viter->first;
1252  int count = viter->second;
1253  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)type ), count );
1254  }
1255 
1256  // Broadcast number of types from root to all nodes
1257  int num_types0 = num_types;
1258  result = MPI_Bcast( &num_types0, 1, MPI_INT, 0, comm );
1259  CHECK_MPI( result );
1260  // Broadcast type list from root to all nodes
1261  typelist root_types( num_types0 );
1262  if( 0 == myPcomm->proc_config().proc_rank() ) root_types = my_types;
1263  result = MPI_Bcast( (void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm );
1264  CHECK_MPI( result );
1265 
1266  // Build local list of any types that root did not know about
1267  typelist non_root_types;
1268  viter = root_types.begin();
1269  for( typelist::iterator iter = my_types.begin(); iter != my_types.end(); ++iter )
1270  {
1271  if( viter == root_types.end() || *viter != *iter )
1272  non_root_types.push_back( *iter );
1273  else
1274  ++viter;
1275  }
1276 
1277  // Determine if any process had types not defined on the root
1278  int non_root_count = non_root_types.size();
1279  int not_done;
1280  result = MPI_Allreduce( &non_root_count, &not_done, 1, MPI_INT, MPI_LOR, comm );
1281  CHECK_MPI( result );
1282  if( not_done )
1283  {
1284  // Get number of types each processor has that root does not
1285  std::vector< int > counts( myPcomm->proc_config().proc_size() );
1286  int two_count = 2 * non_root_count;
1287  result = MPI_Gather( &two_count, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, comm );
1288  CHECK_MPI( result );
1289 
1290  // Get list of types from each processor
1291  std::vector< int > displs( myPcomm->proc_config().proc_size() + 1 );
1292  (void)VALGRIND_MAKE_VEC_UNDEFINED( displs );
1293  displs[0] = 0;
1294  for( unsigned long i = 1; i <= myPcomm->proc_config().proc_size(); ++i )
1295  displs[i] = displs[i - 1] + counts[i - 1];
1296  int total = displs[myPcomm->proc_config().proc_size()];
1297  typelist alltypes( total / 2 );
1298  (void)VALGRIND_MAKE_VEC_UNDEFINED( alltypes );
1299  (void)VALGRIND_CHECK_MEM_IS_DEFINED( &non_root_types[0], non_root_types.size() * sizeof( int ) );
1300  result = MPI_Gatherv( (void*)&non_root_types[0], 2 * non_root_count, MPI_INT, (int*)&alltypes[0], &counts[0],
1301  &displs[0], MPI_INT, 0, comm );
1302  CHECK_MPI( result );
1303 
1304  // Merge type lists.
1305  // Prefer O(n) insertions with O(ln n) search time because
1306  // we expect data from a potentially large number of processes,
1307  // but with a small total number of element types.
1308  if( 0 == myPcomm->proc_config().proc_rank() )
1309  {
1310  for( viter = alltypes.begin(); viter != alltypes.end(); ++viter )
1311  {
1312  typelist::iterator titer = std::lower_bound( my_types.begin(), my_types.end(), *viter );
1313  if( titer == my_types.end() || *titer != *viter ) my_types.insert( titer, *viter );
1314  }
1315 
1316  dbgOut.print( 2, "Global Element Types:\n" );
1317  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1318  dbgOut.printf( 2, " %s : %d\n", CN::EntityTypeName( (EntityType)viter->first ), viter->second );
1319  }
1320 
1321  // Send total number of types to each processor
1322  total = my_types.size();
1323  result = MPI_Bcast( &total, 1, MPI_INT, 0, comm );
1324  CHECK_MPI( result );
1325 
1326  // Send list of types to each processor
1327  my_types.resize( total );
1328  result = MPI_Bcast( (void*)&my_types[0], 2 * total, MPI_INT, 0, comm );
1329  CHECK_MPI( result );
1330  }
1331  else
1332  {
1333  // Special case: if root had types but some subset of procs did not
1334  // have those types, but there are no types that the root doesn't
1335  // know about then we still need to update processes that are missing
1336  // types.
1337  my_types.swap( root_types );
1338  }
1339 
1340  // Insert missing types into exportList, with an empty
1341  // range of entities to export.
1342  std::list< ExportSet >::iterator ex_iter = exportList.begin();
1343  for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1344  {
1345  while( ex_iter != exportList.end() && *ex_iter < *viter )
1346  ++ex_iter;
1347 
1348  if( ex_iter == exportList.end() || !( *ex_iter == *viter ) )
1349  {
1350  ExportSet insert;
1351  insert.type = (EntityType)viter->first;
1352  insert.num_nodes = viter->second;
1353  insert.first_id = 0;
1354  insert.offset = 0;
1355  insert.adj_offset = 0;
1356  ex_iter = exportList.insert( ex_iter, insert );
1357  }
1358  }
1359 
1360  return MB_SUCCESS;
1361 }
1362 
1364 {
1365  struct ElemSetCreator : public DataSetCreator
1366  {
1367  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1368  {
1369  return file->create_elem_table( *ex, size, start_id );
1370  }
1371  };
1372 
1373  const int numtypes = exportList.size();
1374  std::vector< ExportSet* > groups( numtypes );
1375  std::vector< long > counts( numtypes ), offsets( numtypes ), max_ents( numtypes ), total_ents( numtypes );
1376  std::vector< wid_t > start_ids( numtypes );
1377 
1378  size_t idx = 0;
1379  std::list< ExportSet >::iterator ex_iter;
1380  for( ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1381  {
1382  groups[idx] = &*ex_iter;
1383  counts[idx] = ex_iter->range.size();
1384  }
1385  ErrorCode rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &total_ents[0], ElemSetCreator(),
1386  &groups[0], &start_ids[0] );
1387  CHECK_MB( rval );
1388 
1389  for( idx = 0, ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter, ++idx )
1390  {
1391  ex_iter->first_id = start_ids[idx];
1392  ex_iter->offset = offsets[idx];
1393  ex_iter->max_num_ents = max_ents[idx];
1394  ex_iter->total_num_ents = total_ents[idx];
1395  rval = assign_ids( ex_iter->range, ex_iter->first_id + ex_iter->offset );
1396  CHECK_MB( rval );
1397  }
1398 
1399  return MB_SUCCESS;
1400 }
1401 
1403 {
1404  struct AdjSetCreator : public DataSetCreator
1405  {
1406  ErrorCode operator()( WriteHDF5* file, long size, const ExportSet* ex, long& start_id ) const
1407  {
1408  mhdf_Status status;
1409  hid_t handle = mhdf_createAdjacency( file->file_ptr(), ex->name(), size, &status );
1410  CHECK_HDFN( status );
1411  mhdf_closeData( file->file_ptr(), handle, &status );
1412  CHECK_HDFN( status );
1413  start_id = -1;
1414  return MB_SUCCESS;
1415  }
1416  };
1417 
1418  std::vector< ExportSet* > groups;
1419 #ifdef WRITE_NODE_ADJACENCIES
1420  groups.push_back( &nodeSet );
1421 #endif
1422  for( std::list< ExportSet >::iterator ex_iter = exportList.begin(); ex_iter != exportList.end(); ++ex_iter )
1423  groups.push_back( &*ex_iter );
1424 
1425  ErrorCode rval;
1426  const int numtypes = groups.size();
1427  std::vector< long > counts( numtypes );
1428  std::vector< long > offsets( numtypes );
1429  std::vector< long > max_ents( numtypes );
1430  std::vector< long > totals( numtypes );
1431  for( int i = 0; i < numtypes; ++i )
1432  {
1433  wid_t count;
1434  rval = count_adjacencies( groups[i]->range, count );
1435  CHECK_MB( rval );
1436  counts[i] = count;
1437  }
1438 
1439  rval = create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &totals[0], AdjSetCreator(), &groups[0] );
1440  CHECK_MB( rval );
1441 
1442  // Cppcheck warning (false positive): variable groups is assigned a value that is never used
1443  for( int i = 0; i < numtypes; ++i )
1444  {
1445  groups[i]->max_num_adjs = max_ents[i];
1446  groups[i]->adj_offset = offsets[i];
1447  }
1448  return MB_SUCCESS;
1449 }
1450 
1451 const unsigned SSVB = 3;
1452 
1453 void WriteHDF5Parallel::print_set_sharing_data( const Range& range, const char* label, Tag idt )
1454 {
1455  dbgOut.printf( SSVB, "set\tid\towner\t%-*s\tfid\tshared\n", (int)( sizeof( EntityHandle ) * 2 ), "handle" );
1456  for( Range::iterator it = range.begin(); it != range.end(); ++it )
1457  {
1458  int id;
1459  iFace->tag_get_data( idt, &*it, 1, &id );
1460  EntityHandle handle = 0;
1461  unsigned owner = 0;
1462  wid_t file_id = 0;
1463  myPcomm->get_entityset_owner( *it, owner, &handle );
1464  if( !idMap.find( *it, file_id ) ) file_id = 0;
1465  dbgOut.printf( SSVB, "%s\t%d\t%u\t%lx\t%lu\t", label, id, owner, (unsigned long)handle,
1466  (unsigned long)file_id );
1467  std::vector< unsigned > procs;
1468  myPcomm->get_entityset_procs( *it, procs );
1469  if( procs.empty() )
1470  dbgOut.print( SSVB, "<none>\n" );
1471  else
1472  {
1473  for( unsigned i = 0; i < procs.size() - 1; ++i )
1474  dbgOut.printf( SSVB, "%u,", procs[i] );
1475  dbgOut.printf( SSVB, "%u\n", procs.back() );
1476  }
1477  }
1478 }
1479 
1481 {
1482  const char* tag_names[][2] = { { MATERIAL_SET_TAG_NAME, "block" },
1483  { DIRICHLET_SET_TAG_NAME, "nodeset" },
1484  { NEUMANN_SET_TAG_NAME, "sideset" },
1485  { 0, 0 } };
1486 
1487  for( int i = 0; tag_names[i][0]; ++i )
1488  {
1489  Tag tag;
1490  if( MB_SUCCESS != iFace->tag_get_handle( tag_names[i][0], 1, MB_TYPE_INTEGER, tag ) ) continue;
1491 
1492  Range tagged;
1493  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, tagged );
1494  print_set_sharing_data( tagged, tag_names[i][1], tag );
1495  }
1496 
1497  Tag geom, id;
1499  id = iFace->globalId_tag();
1500 
1501  const char* geom_names[] = { "vertex", "curve", "surface", "volume" };
1502  for( int d = 0; d <= 3; ++d )
1503  {
1504  Range tagged;
1505  const void* vals[] = { &d };
1506  iFace->get_entities_by_type_and_tag( 0, MBENTITYSET, &geom, vals, 1, tagged );
1507  print_set_sharing_data( tagged, geom_names[d], id );
1508  }
1509 }
1510 
1512 {
1513  ErrorCode rval;
1514  int mperr;
1515  const int TAG = 0xD0E;
1516  // const unsigned rank = myPcomm->proc_config().proc_rank();
1517  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1518 
1519  dbgOut.tprint( 1, "COMMUNICATING SHARED SET IDS\n" );
1520  dbgOut.print( 6, "Owned, shared sets: ", owned );
1521 
1522  // Post receive buffers for all procs for which we share sets
1523 
1524  std::vector< unsigned > procs;
1525  rval = myPcomm->get_entityset_owners( procs );
1526  CHECK_MB( rval );
1527  std::vector< unsigned >::iterator it = std::find( procs.begin(), procs.end(), myPcomm->proc_config().proc_rank() );
1528  if( it != procs.end() ) procs.erase( it );
1529 
1530  std::vector< MPI_Request > recv_req( procs.size(), MPI_REQUEST_NULL );
1531  std::vector< std::vector< unsigned long > > recv_buf( procs.size() );
1532 
1533  size_t recv_count = 0;
1534  for( size_t i = 0; i < procs.size(); ++i )
1535  {
1536  Range tmp;
1537  rval = myPcomm->get_owned_sets( procs[i], tmp );
1538  CHECK_MB( rval );
1539  size_t count =
1540  intersect( tmp, remote ).size(); // Necessary because we might not be writing all of the database
1541  if( count )
1542  {
1543  dbgOut.printf( 6, "Sets owned by proc %u (remote handles): ", procs[i] );
1544  if( dbgOut.get_verbosity() >= 6 )
1545  {
1546  Range remote_handles;
1547  tmp = intersect( tmp, remote );
1548  for( Range::iterator j = tmp.begin(); j != tmp.end(); ++j )
1549  {
1550  unsigned r;
1551  EntityHandle h;
1552  myPcomm->get_entityset_owner( *j, r, &h );
1553  assert( r == procs[i] );
1554  remote_handles.insert( h );
1555  }
1556  dbgOut.print( 6, remote_handles );
1557  }
1558  recv_count++;
1559  recv_buf[i].resize( 2 * count + 1 );
1560  dbgOut.printf( 5, "Posting receive buffer of size %lu for proc %u (%lu of %lu owned sets)\n",
1561  (unsigned long)recv_buf[i].size(), procs[i], count, tmp.size() );
1562  mperr =
1563  MPI_Irecv( &recv_buf[i][0], recv_buf[i].size(), MPI_UNSIGNED_LONG, procs[i], TAG, comm, &recv_req[i] );
1564  CHECK_MPI( mperr );
1565  }
1566  }
1567 
1568  // Send set ids to all procs with which we share them
1569 
1570  // First build per-process lists of sets for which we need to send data
1571  std::map< unsigned, Range > send_sets;
1572  std::vector< unsigned > set_procs;
1573  for( Range::reverse_iterator i = owned.rbegin(); i != owned.rend(); ++i )
1574  {
1575  set_procs.clear();
1576  rval = myPcomm->get_entityset_procs( *i, set_procs );
1577  CHECK_MB( rval );
1578  for( size_t j = 0; j < set_procs.size(); ++j )
1579  if( set_procs[j] != myPcomm->proc_config().proc_rank() ) send_sets[set_procs[j]].insert( *i );
1580  }
1581  assert( send_sets.find( myPcomm->proc_config().proc_rank() ) == send_sets.end() );
1582 
1583  // Now send the data
1584  std::vector< std::vector< unsigned long > > send_buf( send_sets.size() );
1585  std::vector< MPI_Request > send_req( send_sets.size() );
1586  std::map< unsigned, Range >::iterator si = send_sets.begin();
1587  for( size_t i = 0; si != send_sets.end(); ++si, ++i )
1588  {
1589  dbgOut.printf( 6, "Sending data for shared sets to proc %u: ", si->first );
1590  dbgOut.print( 6, si->second );
1591 
1592  send_buf[i].reserve( 2 * si->second.size() + 1 );
1593  send_buf[i].push_back( si->second.size() );
1594  for( Range::iterator j = si->second.begin(); j != si->second.end(); ++j )
1595  {
1596  send_buf[i].push_back( *j );
1597  send_buf[i].push_back( idMap.find( *j ) );
1598  }
1599  dbgOut.printf( 5, "Sending buffer of size %lu to proc %u (%lu of %lu owned sets)\n",
1600  (unsigned long)send_buf[i].size(), si->first, si->second.size(), owned.size() );
1601  mperr = MPI_Isend( &send_buf[i][0], send_buf[i].size(), MPI_UNSIGNED_LONG, si->first, TAG, comm, &send_req[i] );
1602  }
1603 
1604  // Process received data
1605  MPI_Status status;
1606  int idx;
1607  while( recv_count-- )
1608  {
1609  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
1610  CHECK_MPI( mperr );
1611 
1612  assert( (unsigned)status.MPI_SOURCE == procs[idx] );
1613  assert( 2 * recv_buf[idx].front() + 1 == recv_buf[idx].size() );
1614  const size_t n = std::min< size_t >( recv_buf[idx].front(), ( recv_buf[idx].size() - 1 ) / 2 );
1615  dbgOut.printf( 5, "Received buffer of size %lu from proc %d\n", (unsigned long)( 2 * n + 1 ),
1616  (int)status.MPI_SOURCE );
1617 
1618  for( size_t i = 0; i < n; ++i )
1619  {
1620  EntityHandle handle = 0;
1621  rval = myPcomm->get_entityset_local_handle( procs[idx], recv_buf[idx][2 * i + 1], handle );
1622  CHECK_MB( rval );
1623  assert( handle != 0 );
1624  if( !idMap.insert( handle, recv_buf[idx][2 * i + 2], 1 ).second )
1625  error( MB_FAILURE ); // Conflicting IDs??????
1626  }
1627 
1628  recv_req[idx] = MPI_REQUEST_NULL;
1629  }
1630  assert( MPI_SUCCESS == MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ) &&
1631  MPI_UNDEFINED == idx ); // Check that we got them all
1632 
1633  // Wait for all sends to complete before we release send
1634  // buffers (implicitly releases when we return from this function)
1635 
1636  std::vector< MPI_Status > stats( send_req.size() );
1637  mperr = MPI_Waitall( send_req.size(), &send_req[0], &stats[0] );
1638  CHECK_MPI( mperr );
1639 
1641 
1642  return MB_SUCCESS;
1643 }
1644 
1645 // void get_global_ids(Interface* iFace, const unsigned long* ptr,
1646 // size_t len, unsigned flags,
1647 // std::vector<int>& ids)
1648 //{
1649 // Tag idtag;
1650 // iFace->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, idtag);
1651 // for (size_t i = 0; i < len; ++i) {
1652 // if (flags & MESHSET_ORDERED) {
1653 // int tmp;
1654 // iFace->tag_get_data(idtag, ptr + i, 1, &tmp);
1655 // ids.push_back(tmp);
1656 // continue;
1657 // }
1658 //
1659 // EntityHandle s = ptr[i];
1660 // EntityHandle e = ptr[++i];
1661 // for (; s <= e; ++s) {
1662 // int tmp;
1663 // iFace->tag_get_data(idtag, &s, 1, &tmp);
1664 // ids.push_back(tmp);
1665 // }
1666 // }
1667 //}
1668 
1669 ErrorCode WriteHDF5Parallel::pack_set( Range::const_iterator it, unsigned long* buffer, size_t buffer_size )
1670 {
1671  ErrorCode rval;
1672  const EntityHandle* ptr;
1673  int len;
1674  unsigned char flags;
1675  std::vector< wid_t > tmp;
1676  size_t newlen;
1677 
1678  // Buffer must always contain at least flags and desired sizes
1679  assert( buffer_size >= 4 );
1680  buffer_size -= 4;
1681 
1682  Range::const_iterator nd = it;
1683  ++nd;
1684  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CONTENTS, &len, &flags );
1685  CHECK_MB( rval );
1686 
1687  // Tag mattag;
1688  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1689  // int block;
1690  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*it, 1, &block))
1691  // block = 0;
1692  //
1693  // if (block) {
1694  // std::vector<int> ids;
1695  // get_global_ids(iFace, ptr, len, flags, ids);
1696  //}
1697 
1698  if( len && !( flags & MESHSET_ORDERED ) )
1699  {
1700  tmp.clear();
1701  bool blocked = false;
1702  assert( ( 0 == len % 2 ) );
1703  rval = range_to_blocked_list( ptr, len / 2, tmp, blocked );
1704  CHECK_MB( rval );
1705  if( blocked ) flags |= mhdf_SET_RANGE_BIT;
1706  }
1707  else
1708  {
1709  tmp.resize( len );
1710  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1711  CHECK_MB( rval );
1712  tmp.resize( newlen );
1713  }
1714 
1715  buffer[0] = flags;
1716  buffer[1] = tmp.size();
1717  if( tmp.size() <= buffer_size ) std::copy( tmp.begin(), tmp.end(), buffer + 4 );
1718 
1719  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::CHILDREN, &len );
1720  CHECK_MB( rval );
1721  tmp.resize( len );
1722  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1723  tmp.resize( newlen );
1724  buffer[2] = tmp.size();
1725  if( tmp.size() <= buffer_size - buffer[1] ) std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] );
1726 
1727  rval = writeUtil->get_entity_list_pointers( it, nd, &ptr, WriteUtilIface::PARENTS, &len );
1728  CHECK_MB( rval );
1729  tmp.resize( len );
1730  rval = vector_to_id_list( ptr, len, &tmp[0], newlen, true );
1731  tmp.resize( newlen );
1732  buffer[3] = tmp.size();
1733  if( tmp.size() <= buffer_size - buffer[1] - buffer[2] )
1734  std::copy( tmp.begin(), tmp.end(), buffer + 4 + buffer[1] + buffer[2] );
1735 
1736  return MB_SUCCESS;
1737 }
1738 
1739 template < typename TYPE >
1740 static void convert_to_ranged_ids( const TYPE* buffer, size_t len, std::vector< WriteHDF5::wid_t >& result )
1741 {
1742  if( !len )
1743  {
1744  result.clear();
1745  return;
1746  }
1747 
1748  result.resize( len * 2 );
1749  Range tmp;
1750  for( size_t i = 0; i < len; i++ )
1751  tmp.insert( (EntityHandle)buffer[i] );
1752  result.resize( tmp.psize() * 2 );
1753  int j = 0;
1754  for( Range::const_pair_iterator pit = tmp.const_pair_begin(); pit != tmp.const_pair_end(); ++pit, j++ )
1755  {
1756  result[2 * j] = pit->first;
1757  result[2 * j + 1] = pit->second - pit->first + 1;
1758  }
1759 }
1760 
1761 static void merge_ranged_ids( const unsigned long* range_list, size_t len, std::vector< WriteHDF5::wid_t >& result )
1762 {
1763  typedef WriteHDF5::wid_t wid_t;
1764  assert( 0 == len % 2 );
1765  assert( 0 == result.size() % 2 );
1766  STATIC_ASSERT( sizeof( std::pair< wid_t, wid_t > ) == 2 * sizeof( wid_t ) );
1767 
1768  result.insert( result.end(), range_list, range_list + len );
1769  size_t plen = result.size() / 2;
1770  Range tmp;
1771  for( size_t i = 0; i < plen; i++ )
1772  {
1773  EntityHandle starth = (EntityHandle)result[2 * i];
1774  EntityHandle endh = (EntityHandle)result[2 * i] + (wid_t)result[2 * i + 1] - 1; // id + count - 1
1775  tmp.insert( starth, endh );
1776  }
1777  // Now convert back to std::vector<WriteHDF5::wid_t>, compressed range format
1778  result.resize( tmp.psize() * 2 );
1779  int j = 0;
1780  for( Range::const_pair_iterator pit = tmp.const_pair_begin(); pit != tmp.const_pair_end(); ++pit, j++ )
1781  {
1782  result[2 * j] = pit->first;
1783  result[2 * j + 1] = pit->second - pit->first + 1;
1784  }
1785 }
1786 
1787 static void merge_vector_ids( const unsigned long* list, size_t len, std::vector< WriteHDF5::wid_t >& result )
1788 {
1789  result.insert( result.end(), list, list + len );
1790 }
1791 
1792 ErrorCode WriteHDF5Parallel::unpack_set( EntityHandle set, const unsigned long* buffer, size_t buffer_size )
1793 {
1794  // Use local variables for readability
1795  assert( buffer_size >= 4 );
1796  assert( buffer[1] + buffer[2] + buffer[3] <= buffer_size );
1797  const unsigned long flags = buffer[0];
1798  unsigned long num_content = buffer[1];
1799  const unsigned long num_child = buffer[2];
1800  const unsigned long num_parent = buffer[3];
1801  const unsigned long* contents = buffer + 4;
1802  const unsigned long* children = contents + num_content;
1803  const unsigned long* parents = children + num_child;
1804 
1805  SpecialSetData* data = find_set_data( set );
1806  assert( NULL != data );
1807  if( NULL == data ) return MB_FAILURE;
1808 
1809  // Tag mattag;
1810  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1811  // int block;
1812  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &set, 1, &block))
1813  // block = 0;
1814 
1815  // If either the current data or the new data is in ranged format,
1816  // then change the other to ranged format if it isn't already
1817  // in both cases when they differ, the data will end up "compressed range"
1818  std::vector< wid_t > tmp;
1819  if( ( flags & mhdf_SET_RANGE_BIT ) != ( data->setFlags & mhdf_SET_RANGE_BIT ) )
1820  {
1821  if( flags & mhdf_SET_RANGE_BIT )
1822  {
1823  tmp = data->contentIds;
1824  convert_to_ranged_ids( &tmp[0], tmp.size(), data->contentIds );
1825  data->setFlags |= mhdf_SET_RANGE_BIT;
1826  }
1827  else
1828  {
1829  tmp.clear();
1830  convert_to_ranged_ids( contents, num_content, tmp );
1831  num_content = tmp.size();
1832  if( sizeof( wid_t ) < sizeof( long ) )
1833  {
1834  size_t old_size = tmp.size();
1835  tmp.resize( sizeof( long ) * old_size / sizeof( wid_t ) );
1836  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1837  for( long i = ( (long)old_size ) - 1; i >= 0; --i )
1838  array[i] = tmp[i];
1839  contents = array;
1840  }
1841  else if( sizeof( wid_t ) > sizeof( long ) )
1842  {
1843  unsigned long* array = reinterpret_cast< unsigned long* >( &tmp[0] );
1844  std::copy( tmp.begin(), tmp.end(), array );
1845  }
1846  contents = reinterpret_cast< unsigned long* >( &tmp[0] );
1847  }
1848  }
1849 
1850  if( data->setFlags & mhdf_SET_RANGE_BIT )
1851  merge_ranged_ids( contents, num_content, data->contentIds );
1852  else
1853  merge_vector_ids( contents, num_content, data->contentIds );
1854 
1855  merge_vector_ids( children, num_child, data->childIds );
1856  merge_vector_ids( parents, num_parent, data->parentIds );
1857  return MB_SUCCESS;
1858 }
1859 
1861 {
1862  ErrorCode rval;
1863  int mperr;
1864  const unsigned rank = myPcomm->proc_config().proc_rank();
1865  const MPI_Comm comm = myPcomm->proc_config().proc_comm();
1866 
1867  dbgOut.tprintf( 1, "COMMUNICATING SHARED SET DATA (%lu owned & %lu remote)\n", (unsigned long)owned.size(),
1868  (unsigned long)remote.size() );
1869 
1870  // Calculate the total number of messages to be in transit (send and receive)
1871  size_t nummess = 0;
1872  std::vector< unsigned > procs;
1873  ;
1874  Range shared( owned );
1875  shared.merge( remote );
1876  for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
1877  {
1878  procs.clear();
1879  rval = myPcomm->get_entityset_procs( *i, procs );
1880  CHECK_MB( rval );
1881  nummess += procs.size();
1882  }
1883 
1884  // Choose a receive buffer size. We need 4*sizeof(long) minimum,
1885  // but that is almost useless so use 16*sizeof(long) as the minimum
1886  // instead. Choose an upper limit such that we don't exceed 32 MB
1887  // of allocated memory (unless we absolutely must to meet the minimum.)
1888  // Also, don't initially choose buffers larger than 128*sizeof(long).
1889  const size_t MAX_BUFFER_MEM = 32 * 1024 * 1024 / sizeof( long );
1890  // const size_t INIT_BUFFER_SIZE = 128;
1891  const size_t INIT_BUFFER_SIZE = 1024;
1892  const size_t MIN_BUFFER_SIZE = 16;
1893  size_t init_buff_size = INIT_BUFFER_SIZE;
1894  if( init_buff_size * nummess > MAX_BUFFER_MEM ) init_buff_size = MAX_BUFFER_MEM / nummess;
1895  if( init_buff_size < MIN_BUFFER_SIZE ) init_buff_size = MIN_BUFFER_SIZE;
1896 
1897  dbgOut.printf( 2, "Using buffer size of %lu for an expected message count of %lu\n", (unsigned long)init_buff_size,
1898  (unsigned long)nummess );
1899 
1900  // Count number of recvs
1901  size_t numrecv = 0;
1902  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1903  {
1904  procs.clear();
1905  rval = myPcomm->get_entityset_procs( *i, procs );
1906  CHECK_MB( rval );
1907  numrecv += procs.size();
1908  if( std::find( procs.begin(), procs.end(), rank ) != procs.end() ) --numrecv;
1909  }
1910 
1911  // Post receive buffers for all owned sets for all sharing procs
1912  std::vector< MPI_Request > recv_req( numrecv, MPI_REQUEST_NULL );
1913  std::vector< MPI_Request > lrecv_req( numrecv, MPI_REQUEST_NULL );
1914 
1915  std::vector< std::vector< unsigned long > > recv_buf( numrecv, std::vector< unsigned long >( init_buff_size ) );
1916  int idx = 0;
1917  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1918  {
1919  procs.clear();
1920  rval = myPcomm->get_entityset_procs( *i, procs );
1921  CHECK_MB( rval );
1922  for( size_t j = 0; j < procs.size(); ++j )
1923  {
1924  if( procs[j] == rank ) continue;
1925  int tag = ID_FROM_HANDLE( *i );
1926  if( *i != CREATE_HANDLE( MBENTITYSET, tag ) )
1927  {
1928 #ifndef NDEBUG
1929  abort();
1930 #endif
1931  CHECK_MB( MB_FAILURE );
1932  }
1933  dbgOut.printf( 5, "Posting buffer to receive set %d from proc %u\n", tag, procs[j] );
1934  mperr =
1935  MPI_Irecv( &recv_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, procs[j], tag, comm, &recv_req[idx] );
1936  CHECK_MPI( mperr );
1937  ++idx;
1938  }
1939  }
1940  assert( (size_t)idx == numrecv );
1941 
1942  // Now send set data for all remote sets that I know about
1943  std::vector< MPI_Request > send_req( remote.size() );
1944  std::vector< std::vector< unsigned long > > send_buf( remote.size() );
1945  idx = 0;
1946  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
1947  {
1948  send_buf[idx].resize( init_buff_size );
1949  rval = pack_set( i, &send_buf[idx][0], init_buff_size );
1950  CHECK_MB( rval );
1951  EntityHandle remote_handle;
1952  unsigned owner;
1953  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
1954  CHECK_MB( rval );
1955 
1956  int tag = ID_FROM_HANDLE( remote_handle );
1957  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
1958  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n",
1959  send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner );
1960  mperr = MPI_Isend( &send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
1961  CHECK_MPI( mperr );
1962  }
1963 
1964  // Tag mattag;
1965  // iFace->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag);
1966 
1967  // Now initialize local data for managing contents of owned, shared sets
1968  assert( specialSets.empty() );
1969  specialSets.clear();
1970  specialSets.reserve( owned.size() );
1971  for( Range::iterator i = owned.begin(); i != owned.end(); ++i )
1972  {
1973  // int block;
1974  // if (MB_SUCCESS != iFace->tag_get_data(mattag, &*i, 1, &block))
1975  // block = 0;
1976  // std::vector<int> ids;
1977 
1978  SpecialSetData data;
1979  data.setHandle = *i;
1980  rval = iFace->get_meshset_options( *i, data.setFlags );
1981  CHECK_MB( rval );
1982  specialSets.push_back( data );
1983  std::vector< EntityHandle > list;
1984  if( data.setFlags & MESHSET_ORDERED )
1985  {
1986  list.clear();
1987  rval = iFace->get_entities_by_handle( *i, list );
1988  CHECK_MB( rval );
1989  rval = vector_to_id_list( list, specialSets.back().contentIds, true );
1990  CHECK_MB( rval );
1991  // if (block)
1992  // get_global_ids(iFace, &list[0], list.size(), MESHSET_ORDERED, ids);
1993  }
1994  else
1995  {
1996  Range range;
1997  rval = iFace->get_entities_by_handle( *i, range );
1998  CHECK_MB( rval );
1999  bool ranged;
2000  rval = range_to_blocked_list( range, specialSets.back().contentIds, ranged );
2001  if( ranged ) specialSets.back().setFlags |= mhdf_SET_RANGE_BIT;
2002  // if (block) {
2003  // std::vector<EntityHandle> tmp;
2004  // for (Range::const_pair_iterator pi = range.const_pair_begin(); pi !=
2005  // range.const_pair_end(); ++pi) {
2006  // tmp.push_back(pi->first);
2007  // tmp.push_back(pi->second);
2008  // }
2009  // get_global_ids(iFace, &tmp[0], tmp.size(), ranged ? 0 : MESHSET_ORDERED, ids);
2010  //}
2011  }
2012 
2013  list.clear();
2014  rval = iFace->get_parent_meshsets( *i, list );
2015  CHECK_MB( rval );
2016  rval = vector_to_id_list( list, specialSets.back().parentIds, true );
2017  CHECK_MB( rval );
2018  rval = iFace->get_child_meshsets( *i, list );
2019  CHECK_MB( rval );
2020  rval = vector_to_id_list( list, specialSets.back().childIds, true );
2021  CHECK_MB( rval );
2022  }
2023 
2024  // Process received buffers, repost larger buffers where necessary
2025  size_t remaining = numrecv;
2026  numrecv = 0;
2027  while( remaining-- )
2028  {
2029  std::vector< unsigned long > dead;
2030  MPI_Status status;
2031  mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
2032  CHECK_MPI( mperr );
2033  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2034  std::vector< unsigned long >& buff = recv_buf[idx];
2035  size_t size = buff[1] + buff[2] + buff[3] + 4;
2036  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", (unsigned long)size, status.MPI_TAG,
2037  status.MPI_SOURCE );
2038  if( size <= init_buff_size )
2039  {
2040  rval = unpack_set( handle, &buff[0], init_buff_size );
2041  CHECK_MB( rval );
2042  dead.swap( buff ); // Release memory
2043  }
2044  else
2045  {
2046  // Data was too big for init_buff_size
2047  // repost with larger buffer
2048  buff.resize( size );
2049  dbgOut.printf( 5, "Re-Posting buffer to receive set %d from proc %d with size %lu\n", status.MPI_TAG,
2050  status.MPI_SOURCE, (unsigned long)size );
2051  mperr = MPI_Irecv( &buff[0], size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm,
2052  &lrecv_req[idx] );
2053  CHECK_MPI( mperr );
2054  ++numrecv;
2055  }
2056  recv_req[idx] = MPI_REQUEST_NULL;
2057  }
2058 
2059  // Wait for sends to complete
2060  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2061 
2062  // Re-send sets that didn't fit initial buffer size
2063  idx = 0;
2064  for( Range::iterator i = remote.begin(); i != remote.end(); ++i, ++idx )
2065  {
2066  std::vector< unsigned long >& buff = send_buf[idx];
2067  size_t size = buff[1] + buff[2] + buff[3] + 4;
2068  if( size <= init_buff_size ) continue;
2069 
2070  buff.resize( size );
2071  rval = pack_set( i, &buff[0], size );
2072  CHECK_MB( rval );
2073  EntityHandle remote_handle;
2074  unsigned owner;
2075  rval = myPcomm->get_entityset_owner( *i, owner, &remote_handle );
2076  CHECK_MB( rval );
2077 
2078  int tag = ID_FROM_HANDLE( remote_handle );
2079  assert( remote_handle == CREATE_HANDLE( MBENTITYSET, tag ) );
2080  dbgOut.printf( 5, "Sending %lu values for set %d to proc %u\n", (unsigned long)size, tag, owner );
2081  mperr = MPI_Isend( &buff[0], size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
2082  CHECK_MPI( mperr );
2083  }
2084 
2085  // Process received buffers
2086  remaining = numrecv;
2087  while( remaining-- )
2088  {
2089  std::vector< unsigned long > dead;
2090  MPI_Status status;
2091  mperr = MPI_Waitany( lrecv_req.size(), &lrecv_req[0], &idx, &status );
2092  CHECK_MPI( mperr );
2093  EntityHandle handle = CREATE_HANDLE( MBENTITYSET, status.MPI_TAG );
2094  std::vector< unsigned long >& buff = recv_buf[idx];
2095  dbgOut.printf( 5, "Received %lu values for set %d from proc %d\n", 4 + buff[1] + buff[2] + buff[3],
2096  status.MPI_TAG, status.MPI_SOURCE );
2097  rval = unpack_set( handle, &buff[0], buff.size() );
2098  CHECK_MB( rval );
2099  dead.swap( buff ); // Release memory
2100 
2101  lrecv_req[idx] = MPI_REQUEST_NULL;
2102  }
2103 
2104  // Wait for sends to complete
2105  MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2106 
2107  return MB_SUCCESS;
2108 }
2109 
2111 {
2112  Range::const_iterator riter;
2113  const unsigned rank = myPcomm->proc_config().proc_rank();
2114 
2115  START_SERIAL;
2117  END_SERIAL;
2118  CpuTimer timer;
2119 
2120  // Remove remote sets from setSets
2121  Range shared, owned, remote;
2122  ErrorCode rval = myPcomm->get_shared_sets( shared );
2123  CHECK_MB( rval );
2124  shared = intersect( shared, setSet.range );
2125  rval = myPcomm->get_owned_sets( rank, owned );
2126  CHECK_MB( rval );
2127  owned = intersect( owned, setSet.range );
2128  remote = subtract( shared, owned );
2129  setSet.range = subtract( setSet.range, remote );
2130 
2131  // Create set meta table
2132  struct SetDescCreator : public DataSetCreator
2133  {
2134  ErrorCode operator()( WriteHDF5* writer, long size, const ExportSet*, long& start_id ) const
2135  {
2136  return writer->create_set_meta( size, start_id );
2137  }
2138  };
2139  long count = setSet.range.size();
2140  rval = create_dataset( 1, &count, &setSet.offset, &setSet.max_num_ents, &setSet.total_num_ents, SetDescCreator(),
2141  NULL, &setSet.first_id );
2142  CHECK_MB( rval );
2144 
2146  CHECK_MB( rval );
2147  if( times ) times[SET_OFFSET_TIME] = timer.time_elapsed();
2148 
2149  // Exchange file IDS for sets between all procs
2150  rval = communicate_shared_set_ids( owned, remote );
2151  CHECK_MB( rval );
2152  if( times ) times[SHARED_SET_IDS] = timer.time_elapsed();
2153 
2154  // Communicate remote set contents, children, etc.
2155  rval = communicate_shared_set_data( owned, remote );
2156  CHECK_MB( rval );
2157  if( times ) times[SHARED_SET_CONTENTS] = timer.time_elapsed();
2158 
2159  // Communicate counts for owned sets
2160  long data_counts[3]; // { #contents, #children, #parents }
2161  rval = count_set_size( setSet.range, data_counts[0], data_counts[1], data_counts[2] );
2162  CHECK_MB( rval );
2163  if( times ) times[SET_OFFSET_TIME] += timer.time_elapsed();
2164 
2165  long offsets[3], max_counts[3], totals[3];
2166  rval = create_dataset( 3, data_counts, offsets, max_counts, totals );
2167  CHECK_MB( rval );
2168 
2169  // Create the datasets
2170  if( 0 == myPcomm->proc_config().proc_rank() )
2171  {
2172  rval = create_set_tables( totals[0], totals[1], totals[2] );
2173  CHECK_MB( rval );
2174  }
2175 
2176  // Store communicated global data
2177  setContentsOffset = offsets[0];
2178  setChildrenOffset = offsets[1];
2179  setParentsOffset = offsets[2];
2180  maxNumSetContents = max_counts[0];
2181  maxNumSetChildren = max_counts[1];
2182  maxNumSetParents = max_counts[2];
2183  writeSetContents = totals[0] > 0;
2184  writeSetChildren = totals[1] > 0;
2185  writeSetParents = totals[2] > 0;
2186 
2187  dbgOut.printf( 2, "set contents: %ld local, %ld global, offset = %ld\n", data_counts[0], totals[0], offsets[0] );
2188  dbgOut.printf( 2, "set children: %ld local, %ld global, offset = %ld\n", data_counts[1], totals[1], offsets[1] );
2189  dbgOut.printf( 2, "set parents: %ld local, %ld global, offset = %ld\n", data_counts[2], totals[2], offsets[2] );
2190 
2191  return MB_SUCCESS;
2192 }
2193 
2195 {
2196  Range result;
2197  result.merge( intersect( range, nodeSet.range ) );
2198  result.merge( intersect( range, setSet.range ) );
2199  for( std::list< ExportSet >::iterator eiter = exportList.begin(); eiter != exportList.end(); ++eiter )
2200  result.merge( intersect( range, eiter->range ) );
2201 
2202  // result.merge(intersect(range, myParallelSets));
2203  Range sets;
2204  int junk;
2205  sets.merge( Range::lower_bound( range.begin(), range.end(), CREATE_HANDLE( MBENTITYSET, 0, junk ) ), range.end() );
2206  remove_remote_sets( relative, sets );
2207  result.merge( sets );
2208  range.swap( result );
2209 }
2210 
2212 {
2213  Range result( intersect( range, setSet.range ) );
2214  // Store the non-intersecting entities separately if needed
2215  // Range remaining(subtract(range, result));
2216  range.swap( result );
2217 }
2218 
2219 void WriteHDF5Parallel::remove_remote_entities( EntityHandle relative, std::vector< EntityHandle >& vect )
2220 {
2221  Range intrsct;
2222  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2223  intrsct.insert( *iter );
2224  remove_remote_entities( relative, intrsct );
2225 
2226  unsigned int read, write;
2227  for( read = write = 0; read < vect.size(); ++read )
2228  {
2229  if( intrsct.find( vect[read] ) != intrsct.end() )
2230  {
2231  if( read != write ) vect[write] = vect[read];
2232  ++write;
2233  }
2234  }
2235  if( write != vect.size() ) vect.resize( write );
2236 }
2237 
2238 void WriteHDF5Parallel::remove_remote_sets( EntityHandle relative, std::vector< EntityHandle >& vect )
2239 {
2240  Range intrsct;
2241  for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2242  intrsct.insert( *iter );
2243  remove_remote_sets( relative, intrsct );
2244 
2245  unsigned int read, write;
2246  for( read = write = 0; read < vect.size(); ++read )
2247  {
2248  if( intrsct.find( vect[read] ) != intrsct.end() )
2249  {
2250  if( read != write ) vect[write] = vect[read];
2251  ++write;
2252  }
2253  }
2254  if( write != vect.size() ) vect.resize( write );
2255 }
2256 
2258 {
2259  ErrorCode rval;
2260 
2261  // For each entity owned on the interface, write its file id to
2262  // a tag. The sets of entities to be written should already contain
2263  // only owned entities so by intersecting with them we not only
2264  // filter by entities to be written, but also restrict to entities
2265  // owned by the proc
2266 
2267  // Get list of interface entities
2268  Range imesh, tmp;
2269  for( std::list< ExportSet >::reverse_iterator i = exportList.rbegin(); i != exportList.rend(); ++i )
2270  {
2271  tmp.clear();
2272  rval = myPcomm->filter_pstatus( i->range, PSTATUS_SHARED, PSTATUS_AND, -1, &tmp );
2273  if( MB_SUCCESS != rval ) return error( rval );
2274  imesh.merge( tmp );
2275  }
2276  tmp.clear();
2278  if( MB_SUCCESS != rval ) return error( rval );
2279  imesh.merge( tmp );
2280 
2281  // Create tag to store file IDs
2282  EntityHandle default_val = 0;
2283  Tag file_id_tag = 0;
2284  rval = iFace->tag_get_handle( "__hdf5_ll_fileid", 1, MB_TYPE_HANDLE, file_id_tag, MB_TAG_DENSE | MB_TAG_CREAT,
2285  &default_val );
2286  if( MB_SUCCESS != rval ) return error( rval );
2287 
2288  // Copy file IDs into tag
2289  std::vector< EntityHandle > file_id_vect( imesh.size() );
2291  std::vector< EntityHandle >::iterator j = file_id_vect.begin();
2292  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2293  {
2294  *j = idMap.find( *i );
2295  if( !*j )
2296  {
2297  iFace->tag_delete( file_id_tag );
2298  return error( MB_FAILURE );
2299  }
2300  }
2301  rval = iFace->tag_set_data( file_id_tag, imesh, &file_id_vect[0] );
2302  if( MB_SUCCESS != rval )
2303  {
2304  iFace->tag_delete( file_id_tag );
2305  return error( rval );
2306  }
2307 
2308  // Do communication
2309  rval = myPcomm->exchange_tags( file_id_tag, imesh );
2310  if( MB_SUCCESS != rval )
2311  {
2312  iFace->tag_delete( file_id_tag );
2313  return error( rval );
2314  }
2315 
2316  // Copy file IDs from tag into idMap for remote entities
2317  file_id_vect.resize( nonlocal.size() );
2318  rval = iFace->tag_get_data( file_id_tag, nonlocal, &file_id_vect[0] );
2319  if( MB_SUCCESS != rval )
2320  {
2321  iFace->tag_delete( file_id_tag );
2322  return error( rval );
2323  }
2324 
2325  j = file_id_vect.begin();
2326  for( i = nonlocal.begin(); i != nonlocal.end(); ++i, ++j )
2327  {
2328  if( *j == 0 )
2329  {
2330  int owner = -1;
2331  myPcomm->get_owner( *i, owner );
2332  const char* name = CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) );
2333  int id = ID_FROM_HANDLE( *i );
2334  MB_SET_ERR_CONT( "Process " << myPcomm->proc_config().proc_rank()
2335  << " did not receive valid id handle for shared " << name << " " << id
2336  << " owned by process " << owner );
2337  dbgOut.printf( 1,
2338  "Did not receive valid remote id for "
2339  "shared %s %d owned by process %d",
2340  name, id, owner );
2341  iFace->tag_delete( file_id_tag );
2342  return error( MB_FAILURE );
2343  }
2344  else
2345  {
2346  if( !idMap.insert( *i, *j, 1 ).second )
2347  {
2348  iFace->tag_delete( file_id_tag );
2349  return error( MB_FAILURE );
2350  }
2351  }
2352  }
2353 
2354 #ifndef NDEBUG
2355  // Check that writer is correct with regards to which entities
2356  // that it owns by verifying that the file ids that we thought
2357  // we were sending where not received instead
2358  file_id_vect.resize( imesh.size() );
2359  rval = iFace->tag_get_data( file_id_tag, imesh, &file_id_vect[0] );
2360  if( MB_SUCCESS != rval )
2361  {
2362  iFace->tag_delete( file_id_tag );
2363  return error( rval );
2364  }
2365  int invalid_count = 0;
2366  j = file_id_vect.begin();
2367  for( i = imesh.begin(); i != imesh.end(); ++i, ++j )
2368  {
2369  EntityHandle h = idMap.find( *i );
2370  if( *j != h )
2371  {
2372  ++invalid_count;
2373  dbgOut.printf( 1, "Conflicting ownership for %s %ld\n", CN::EntityTypeName( TYPE_FROM_HANDLE( *i ) ),
2374  (long)ID_FROM_HANDLE( *i ) );
2375  }
2376  }
2377  if( invalid_count )
2378  {
2379  iFace->tag_delete( file_id_tag );
2380  MB_SET_ERR( MB_FAILURE, invalid_count << " entities with conflicting ownership found by process "
2381  << myPcomm->proc_config().proc_rank()
2382  << ". This will result in duplicate entities written to file" );
2383  }
2384 #endif
2385 
2386  return iFace->tag_delete( file_id_tag );
2387 }
2388 
2389 void WriteHDF5Parallel::print_times( const double* times ) const
2390 {
2391  if( !myPcomm )
2392  {
2393  WriteHDF5::print_times( times );
2394  }
2395  else
2396  {
2397  double recv[NUM_TIMES];
2398  MPI_Reduce( (void*)times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
2399  if( 0 == myPcomm->proc_config().proc_rank() ) WriteHDF5::print_times( recv );
2400  }
2401 }
2402 
2403 } // namespace moab