30 #ifndef MOAB_HAVE_HDF5
31 #error Attempt to compile WriteHDF5Parallel with HDF5 support disabled
34 #include <H5Tpublic.h>
35 #include <H5Ppublic.h>
46 template <
bool Condition >
47 struct STATIC_ASSERTION;
49 struct STATIC_ASSERTION< true >
54 #define PP_CAT_( a, b ) a##b
55 #define PP_CAT( a, b ) PP_CAT_( a, b )
56 #define STATIC_ASSERT( Condition ) \
59 PP_CAT( dummy, __LINE__ ) = sizeof( ::STATIC_ASSERTION< (bool)( Condition ) > ) \
83 int len =
sizeof(
buffer );
84 MPI_Error_string( errorcode,
buffer, &len );
85 buffer[std::min( (
size_t)len,
sizeof(
buffer ) - 1 )] =
'\0';
89 #define MPI_FAILURE_MSG( A ) \
90 "MPI Failure at " __FILE__ ":%d : (Code %d) %s\n", __LINE__, (int)( A ), mpi_err_str( ( A ) )
92 #define CHECK_MPI( A ) \
95 if( MPI_SUCCESS != ( A ) ) \
97 MB_SET_ERR_CONT( "MPI Failure : (Code " << (int)( A ) << ") " << mpi_err_str( ( A ) ) ); \
98 dbgOut.printf( 1, MPI_FAILURE_MSG( ( A ) ) ); \
99 return error( MB_FAILURE ); \
103 #define MB_FAILURE_MSG( A ) "MOAB_Failure at " __FILE__ ":%d : %s (%d)\n", __LINE__, ErrorCodeStr[( A )], (int)( A )
105 #define CHECK_MB( A ) \
108 if( MB_SUCCESS != ( A ) ) \
110 MB_SET_ERR_CONT( "MOAB Failure : " << ErrorCodeStr[( A )] ); \
111 dbgOut.printf( 1, MB_FAILURE_MSG( ( A ) ) ); \
116 #define HDF_FAILURE_MSG( A ) "MHDF Failure at " __FILE__ ":%d : %s\n", __LINE__, mhdf_message( &( A ) )
118 #define CHECK_HDF( A ) \
121 if( mhdf_isError( &( A ) ) ) \
123 MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
124 dbgOut.printf( 1, HDF_FAILURE_MSG( ( A ) ) ); \
125 return error( MB_FAILURE ); \
129 #define CHECK_HDFN( A ) \
132 if( mhdf_isError( &( A ) ) ) \
134 MB_SET_ERR_CONT( "MHDF Failure : " << mhdf_message( &( A ) ) ); \
135 return error( MB_FAILURE ); \
140 #include <valgrind/memcheck.h>
142 template <
typename T >
152 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
153 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
155 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
156 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
158 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
159 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
162 template <
typename T >
171 #define START_SERIAL \
172 for( unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x ) \
174 MPI_Barrier( myPcomm->proc_config().proc_comm() ); \
175 if( _x != myPcomm->proc_config().proc_rank() ) continue
178 MPI_Barrier( myPcomm->proc_config().proc_comm() )
186 MPI_Datatype sendtype,
187 std::vector< unsigned char >& recvbuf,
188 std::vector< int >& recvcounts,
192 int nproc, rank, bytes, err;
193 MPI_Comm_size( comm, &nproc );
194 MPI_Comm_rank( comm, &rank );
195 MPI_Type_size( sendtype, &bytes );
197 recvcounts.resize( rank == root ? nproc : 0 );
198 err = MPI_Gather( &sendcount, 1, MPI_INT, &recvcounts[0], 1, MPI_INT, root, comm );
199 if( MPI_SUCCESS != err )
return err;
201 std::vector< int > disp( recvcounts.size() );
205 for(
int i = 1; i < nproc; ++i )
206 disp[i] = disp[i - 1] + recvcounts[i - 1];
207 recvbuf.resize( bytes * ( disp.back() + recvcounts.back() ) );
210 return MPI_Gatherv( sendbuf, sendcount, sendtype, &recvbuf[0], &recvcounts[0], &disp[0], sendtype, root, comm );
215 const unsigned VB = 2;
218 Tag gid, did, bid, sid, nid;
225 const char* typenames[] = {
"Block ",
"Sideset ",
"NodeSet",
"Vertex",
"Curve",
226 "Surface",
"Volume",
"Body",
"Other" };
252 for(
int ii = 0; ii < 9; ++ii )
255 snprintf( tmp, 64,
"%s (%lu) ", typenames[ii], (
unsigned long)typesets[ii].
size() );
256 str->
print( VB, tmp, typesets[ii] );
258 str->
printf( VB,
"Total: %lu\n", (
unsigned long)sets.
size() );
261 #define debug_barrier() debug_barrier_line( __LINE__ )
265 const unsigned threshold = 2;
266 static unsigned long count = 0;
269 dbgOut.
printf( threshold,
"*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
280 :
WriteHDF5(
iface ), myPcomm( NULL ), pcommAllocated( false ), hslabOp( H5S_SELECT_OR )
302 for( std::list< ExportSet >::iterator eiter =
exportList.begin(); eiter !=
exportList.end(); ++eiter )
314 for( std::list< ExportSet >::iterator eiter =
exportList.begin(); eiter !=
exportList.end(); ++eiter )
319 eiter->range =
subtract( eiter->range, tmpset );
320 nonowned.
merge( tmpset );
325 for( std::list< ExportSet >::iterator eiter =
exportList.begin(); eiter !=
exportList.end(); ++eiter )
336 const std::vector< std::string >& qa_records,
338 const Tag* user_tag_list,
356 MPI_Info info = MPI_INFO_NULL;
361 MPI_Info_create( &info );
362 MPI_Info_set( info,
const_cast< char*
>(
"cb_buffer_size" ),
const_cast< char*
>( cb_size.c_str() ) );
388 memset( type_names, 0,
MBMAXTYPE *
sizeof(
char* ) );
408 topState.start(
"creating node table" );
418 topState.start(
"negotiating element types" );
424 topState.start(
"creating element tables" );
434 topState.start(
"communicating file ids" );
444 topState.start(
"creating meshset tables" );
454 topState.start(
"creating adjacency tables" );
464 topState.start(
"creating tag tables" );
465 rval =
gather_tags( user_tag_list, user_tag_count );
477 dbgOut.
tprint( 1,
"(re)opening file in parallel mode\n" );
479 hid_t hdf_opt = H5Pcreate( H5P_FILE_ACCESS );
491 writeProp = H5Pcreate( H5P_DATASET_XFER );
492 H5Pset_dxpl_mpio(
writeProp, H5FD_MPIO_COLLECTIVE );
499 dbgOut.
print( 1,
"HDF5 library supports H5Sselect_hyperlsab with H5S_SELECT_APPEND\n" );
531 char name[
sizeof(
unsigned long )];
535 if(
len %
sizeof(
unsigned long ) )
536 return len +
sizeof(
unsigned long ) -
len %
sizeof(
unsigned long );
562 sizeof(
unsigned long );
592 size_t name_len = name.size() + 1;
609 size_t init_size =
buffer.size();
616 ptr->
type = data_type;
621 return error( rval );
625 memcpy( ptr->
name, name.data(), name.size() );
633 std::vector< TagDesc* >* missing,
634 std::vector< TagDesc* >* newlist )
639 std::list< TagDesc >::iterator tag_iter;
641 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter )
642 tag_iter->write_sparse =
true;
647 std::set< TagDesc* > newset;
652 std::vector< unsigned char >::const_iterator diter =
buffer.begin();
654 while( diter <
buffer.end() )
660 std::string name( ptr->
name );
671 if( tag_iter ==
tagList.end() )
break;
674 if( tag_iter ==
tagList.end() || n != name )
692 tag_iter =
tagList.insert( tag_iter, newtag );
693 if( newlist ) newset.insert( &*tag_iter );
699 if( type != ptr->
type )
701 MB_SET_ERR( MB_FAILURE,
"Processes have inconsistent data type for tag \"" << name <<
"\"" );
707 MB_SET_ERR( MB_FAILURE,
"Processes have inconsistent size for tag \"" << name <<
"\"" );
709 tag_iter->write_sparse =
false;
719 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter )
721 if( tag_iter->write_sparse )
723 tag_iter->write_sparse =
false;
724 missing->push_back( &*tag_iter );
732 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter )
733 if( newset.find( &*tag_iter ) != newset.end() ) newlist->push_back( &*tag_iter );
739 static void set_bit(
int position,
unsigned char* bytes )
741 int byte = position / 8;
742 int bit = position % 8;
743 bytes[byte] |= ( ( (
unsigned char)1 ) << bit );
746 static bool get_bit(
int position,
const unsigned char* bytes )
748 int byte = position / 8;
749 int bit = position % 8;
750 return 0 != ( bytes[byte] & ( ( (
unsigned char)1 ) << bit ) );
755 std::list< TagDesc >::iterator tag_iter;
761 subState.start(
"negotiating tag list" );
773 std::vector< unsigned char > tag_buffer;
774 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter )
781 unsigned long size = tag_buffer.size();
782 err = MPI_Bcast( &
size, 1, MPI_UNSIGNED_LONG, 0, comm );
784 tag_buffer.resize(
size );
785 err = MPI_Bcast( &tag_buffer[0],
size, MPI_UNSIGNED_CHAR, 0, comm );
789 std::vector< TagDesc* > missing;
794 int code, lcode = (
MB_SUCCESS != rval ) ? rval + 2 : missing.empty() ? 0 : 1;
795 err = MPI_Allreduce( &lcode, &code, 1, MPI_INT, MPI_MAX, comm );
806 dbgOut.
print( 1,
"Not all procs had same tag definitions, negotiating...\n" );
810 for(
size_t i = 0; i < missing.size(); ++i )
817 std::vector< int > junk;
818 assert( rank || tag_buffer.empty() );
819 err =
my_Gatherv( &tag_buffer[0], tag_buffer.size(), MPI_UNSIGNED_CHAR, tag_buffer, junk, 0, comm );
827 std::vector< TagDesc* > newlist;
831 for(
size_t i = 0;
MB_SUCCESS == rval && i != newlist.size(); ++i )
839 long this_size = tag_buffer.size();
841 err = MPI_Bcast( &this_size, 1, MPI_LONG, 0, comm );
848 tag_buffer.resize( this_size );
849 err = MPI_Bcast( &tag_buffer[0], this_size, MPI_UNSIGNED_CHAR, 0, comm );
858 subState.start(
"negotiate which element/tag combinations are dense" );
869 const int bytes_per_tag = (
exportList.size() + 9 ) / 8;
870 std::vector< unsigned char > data( bytes_per_tag *
tagList.size(), 0 );
871 std::vector< unsigned char > recv( data.size(), 0 );
872 unsigned char* iter = &data[0];
875 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter, iter += bytes_per_tag )
893 bool prefer_dense =
false;
899 const void* defval = 0;
908 dbgOut.
printf( 2,
"Can write dense data for \"%s\"/Nodes\n", n.c_str() );
910 std::list< ExportSet >::const_iterator ex_iter =
exportList.begin();
911 for( ++i; ex_iter !=
exportList.end(); ++i, ++ex_iter )
916 if( ( prefer_dense && ex_iter->range.empty() ) ||
920 dbgOut.
printf( 2,
"Can write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
926 dbgOut.
printf( 2,
"Can write dense data for \"%s\"/Sets\n", n.c_str() );
932 err = MPI_Allreduce( &data[0], &recv[0], data.size(), MPI_UNSIGNED_CHAR, MPI_BAND,
941 std::vector< long > counts;
945 const unsigned char* iter2 = &data[0];
947 ++tag_iter, iter += bytes_per_tag, iter2 += bytes_per_tag )
961 tag_iter->dense_list.push_back(
nodeSet );
963 dbgOut.
printf( 2,
"Will write dense data for \"%s\"/Nodes\n", n.c_str() );
965 std::list< ExportSet >::const_iterator ex_iter =
exportList.begin();
966 for( ++i; ex_iter !=
exportList.end(); ++i, ++ex_iter )
971 tag_iter->dense_list.push_back( *ex_iter );
972 dbgOut.
printf( 2,
"WIll write dense data for \"%s\"/%s\n", n.c_str(), ex_iter->name() );
973 tagged -= ex_iter->range;
979 tag_iter->dense_list.push_back(
setSet );
980 dbgOut.
printf( 2,
"Will write dense data for \"%s\"/Sets\n", n.c_str() );
984 counts.push_back( tagged.
size() );
989 unsigned long data_len;
992 counts.push_back( data_len );
997 subState.start(
"Negotiate offsets for sparse tag info" );
999 std::vector< long > offsets( counts.size() ), maxima( counts.size() ), totals( counts.size() );
1000 rval =
create_dataset( counts.size(), &counts[0], &offsets[0], &maxima[0], &totals[0] );
1005 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter, ++idx )
1007 assert( idx < counts.size() );
1008 tag_iter->sparse_offset = offsets[idx];
1009 tag_iter->max_num_ents = maxima[idx];
1010 tag_iter->write_sparse = ( 0 != totals[idx] );
1015 assert( idx < counts.size() );
1016 tag_iter->var_data_offset = offsets[idx];
1017 tag_iter->max_num_vals = maxima[idx];
1021 tag_iter->var_data_offset = 0;
1022 tag_iter->max_num_vals = 0;
1032 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter, ++iidx )
1034 assert( iidx < totals.size() );
1035 unsigned long num_ents = totals[iidx];
1036 unsigned long num_val = 0;
1041 assert( iidx < totals.size() );
1042 num_val = totals[iidx];
1044 dbgOut.
printf( 2,
"Writing tag description for tag 0x%lx with %lu values\n",
1045 (
unsigned long)tag_iter->tag_id, num_val ? num_val : num_ents );
1047 rval =
create_tag( *tag_iter, num_ents, num_val );
1054 dbgOut.
printf( 2,
"Tags: %12s %8s %8s %8s %8s %8s\n",
"Name",
"Count",
"Offset",
"Var Off",
"Max Ent",
1057 for( tag_iter =
tagList.begin(); tag_iter !=
tagList.end(); ++tag_iter )
1063 dbgOut.
printf( 2,
"%18s %8lu %8lu %8lu %8lu 0x%7lx\n", name.c_str(), (
unsigned long)this_size,
1064 (
unsigned long)tag_iter->sparse_offset, (
unsigned long)tag_iter->var_data_offset,
1065 (
unsigned long)tag_iter->max_num_ents, (
unsigned long)tag_iter->tag_id );
1081 const long* num_owned,
1083 long* max_proc_entities,
1084 long* total_entities,
1087 wid_t* first_ids_out )
1096 std::vector< long > counts( rank ? 0 : nproc * num_datasets );
1098 result = MPI_Gather(
const_cast< long*
>( num_owned ), num_datasets, MPI_LONG, &counts[0], num_datasets, MPI_LONG,
1104 std::vector< DatasetVals > cumulative( num_datasets, zero_val );
1107 for(
unsigned i = 0; i < nproc; i++ )
1109 const long* proc_data = &counts[i * num_datasets];
1110 for(
int index = 0; index < num_datasets; ++index )
1112 cumulative[index].total += proc_data[index];
1113 if( proc_data[index] > cumulative[index].max_count ) cumulative[index].max_count = proc_data[index];
1117 for(
int index = 0; index < num_datasets; ++index )
1119 if( cumulative[index].total )
1121 rval = creator(
this, cumulative[index].total, groups ? groups[index] : 0, cumulative[index].start_id );
1126 cumulative[index].start_id = -1;
1132 result = MPI_Bcast( (
void*)&cumulative[0], 3 * num_datasets, MPI_LONG, 0, comm );
1134 for(
int index = 0; index < num_datasets; ++index )
1136 if( first_ids_out ) first_ids_out[index] = (
wid_t)cumulative[index].start_id;
1137 max_proc_entities[index] = cumulative[index].max_count;
1138 total_entities[index] = cumulative[index].total;
1145 std::vector< long > prev_size( counts.begin(), counts.begin() + num_datasets );
1147 std::fill( counts.begin(), counts.begin() + num_datasets, 0L );
1149 for(
unsigned i = 1; i < nproc; ++i )
1152 long* prev_data = &counts[( i - 1 ) * num_datasets];
1154 long* proc_data = &counts[i * num_datasets];
1156 for(
int j = 0; j < num_datasets; ++j )
1159 long mysize = proc_data[j];
1162 proc_data[j] = prev_data[j] + prev_size[j];
1164 prev_size[j] = mysize;
1174 result = MPI_Scatter( &counts[0], num_datasets, MPI_LONG, offsets_out, num_datasets, MPI_LONG, 0, comm );
1237 typedef std::vector< std::pair< int, int > > typelist;
1238 typelist my_types( num_types );
1240 typelist::iterator viter = my_types.begin();
1241 for( std::list< ExportSet >::iterator eiter =
exportList.begin(); eiter !=
exportList.end(); ++eiter )
1243 viter->first = eiter->type;
1244 viter->second = eiter->num_nodes;
1249 for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1251 int type = viter->first;
1252 int count = viter->second;
1257 int num_types0 = num_types;
1258 result = MPI_Bcast( &num_types0, 1, MPI_INT, 0, comm );
1261 typelist root_types( num_types0 );
1263 result = MPI_Bcast( (
void*)&root_types[0], 2 * num_types0, MPI_INT, 0, comm );
1267 typelist non_root_types;
1268 viter = root_types.begin();
1269 for( typelist::iterator iter = my_types.begin(); iter != my_types.end(); ++iter )
1271 if( viter == root_types.end() || *viter != *iter )
1272 non_root_types.push_back( *iter );
1278 int non_root_count = non_root_types.size();
1280 result = MPI_Allreduce( &non_root_count, ¬_done, 1, MPI_INT, MPI_LOR, comm );
1286 int two_count = 2 * non_root_count;
1287 result = MPI_Gather( &two_count, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, comm );
1295 displs[i] = displs[i - 1] + counts[i - 1];
1297 typelist alltypes( total / 2 );
1300 result = MPI_Gatherv( (
void*)&non_root_types[0], 2 * non_root_count, MPI_INT, (
int*)&alltypes[0], &counts[0],
1301 &displs[0], MPI_INT, 0, comm );
1310 for( viter = alltypes.begin(); viter != alltypes.end(); ++viter )
1312 typelist::iterator titer = std::lower_bound( my_types.begin(), my_types.end(), *viter );
1313 if( titer == my_types.end() || *titer != *viter ) my_types.insert( titer, *viter );
1317 for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1322 total = my_types.size();
1323 result = MPI_Bcast( &total, 1, MPI_INT, 0, comm );
1327 my_types.resize( total );
1328 result = MPI_Bcast( (
void*)&my_types[0], 2 * total, MPI_INT, 0, comm );
1337 my_types.swap( root_types );
1342 std::list< ExportSet >::iterator ex_iter =
exportList.begin();
1343 for( viter = my_types.begin(); viter != my_types.end(); ++viter )
1345 while( ex_iter !=
exportList.end() && *ex_iter < *viter )
1348 if( ex_iter ==
exportList.end() || !( *ex_iter == *viter ) )
1351 insert.
type = (EntityType)viter->first;
1356 ex_iter =
exportList.insert( ex_iter, insert );
1376 std::vector< ExportSet* > groups( numtypes );
1377 std::vector< long > counts( numtypes ), offsets( numtypes ), max_ents( numtypes ), total_ents( numtypes );
1378 std::vector< wid_t > start_ids( numtypes );
1381 std::list< ExportSet >::iterator ex_iter;
1384 groups[idx] = &*ex_iter;
1385 counts[idx] = ex_iter->range.size();
1387 ErrorCode rval =
create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &total_ents[0], ElemSetCreator(),
1388 &groups[0], &start_ids[0] );
1393 ex_iter->first_id = start_ids[idx];
1394 ex_iter->offset = offsets[idx];
1395 ex_iter->max_num_ents = max_ents[idx];
1396 ex_iter->total_num_ents = total_ents[idx];
1397 rval =
assign_ids( ex_iter->range, ex_iter->first_id + ex_iter->offset );
1421 std::vector< ExportSet* > groups;
1422 #ifdef WRITE_NODE_ADJACENCIES
1425 for( std::list< ExportSet >::iterator ex_iter =
exportList.begin(); ex_iter !=
exportList.end(); ++ex_iter )
1426 groups.push_back( &*ex_iter );
1429 const int numtypes = groups.size();
1432 std::vector< long > counts( numtypes );
1433 std::vector< long > offsets( numtypes );
1434 std::vector< long > max_ents( numtypes );
1435 std::vector< long > totals( numtypes );
1436 for(
int i = 0; i < numtypes; ++i )
1444 rval =
create_dataset( numtypes, &counts[0], &offsets[0], &max_ents[0], &totals[0], AdjSetCreator(), &groups[0] );
1448 for(
int i = 0; i < numtypes; ++i )
1450 groups[i]->max_num_adjs = max_ents[i];
1451 groups[i]->adj_offset = offsets[i];
1470 if( !
idMap.
find( *it, file_id ) ) file_id = 0;
1471 dbgOut.
printf(
SSVB,
"%s\t%d\t%u\t%lx\t%lu\t", label,
id, owner, (
unsigned long)handle,
1472 (
unsigned long)file_id );
1473 std::vector< unsigned > procs;
1479 for(
unsigned i = 0; i < procs.size() - 1; ++i )
1493 for(
int i = 0; tag_names[i][0]; ++i )
1507 const char* geom_names[] = {
"vertex",
"curve",
"surface",
"volume" };
1508 for(
int d = 0; d <= 3; ++d )
1511 const void* vals[] = { &d };
1521 const int TAG = 0xD0E;
1530 std::vector< unsigned > procs;
1534 if( it != procs.end() ) procs.erase( it );
1536 std::vector< MPI_Request > recv_req( procs.size(), MPI_REQUEST_NULL );
1537 std::vector< std::vector< unsigned long > > recv_buf( procs.size() );
1539 size_t recv_count = 0;
1540 for(
size_t i = 0; i < procs.size(); ++i )
1549 dbgOut.
printf( 6,
"Sets owned by proc %u (remote handles): ", procs[i] );
1552 Range remote_handles;
1559 assert( r == procs[i] );
1560 remote_handles.
insert( h );
1565 recv_buf[i].resize( 2 * count + 1 );
1566 dbgOut.
printf( 5,
"Posting receive buffer of size %lu for proc %u (%lu of %lu owned sets)\n",
1567 (
unsigned long)recv_buf[i].
size(), procs[i], count, tmp.
size() );
1569 MPI_Irecv( &recv_buf[i][0], recv_buf[i].
size(), MPI_UNSIGNED_LONG, procs[i], TAG, comm, &recv_req[i] );
1577 std::map< unsigned, Range > send_sets;
1578 std::vector< unsigned > set_procs;
1584 for(
size_t j = 0; j < set_procs.size(); ++j )
1590 std::vector< std::vector< unsigned long > > send_buf( send_sets.size() );
1591 std::vector< MPI_Request > send_req( send_sets.size() );
1592 std::map< unsigned, Range >::iterator si = send_sets.begin();
1593 for(
size_t i = 0; si != send_sets.end(); ++si, ++i )
1595 dbgOut.
printf( 6,
"Sending data for shared sets to proc %u: ", si->first );
1598 send_buf[i].reserve( 2 * si->second.size() + 1 );
1599 send_buf[i].push_back( si->second.size() );
1600 for(
Range::iterator j = si->second.begin(); j != si->second.end(); ++j )
1602 send_buf[i].push_back( *j );
1603 send_buf[i].push_back(
idMap.
find( *j ) );
1605 dbgOut.
printf( 5,
"Sending buffer of size %lu to proc %u (%lu of %lu owned sets)\n",
1606 (
unsigned long)send_buf[i].
size(), si->first, si->second.size(), owned.
size() );
1607 mperr = MPI_Isend( &send_buf[i][0], send_buf[i].
size(), MPI_UNSIGNED_LONG, si->first, TAG, comm, &send_req[i] );
1613 while( recv_count-- )
1615 mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
1618 assert( (
unsigned)status.MPI_SOURCE == procs[idx] );
1619 assert( 2 * recv_buf[idx].front() + 1 == recv_buf[idx].
size() );
1620 const size_t n = std::min< size_t >( recv_buf[idx].front(), ( recv_buf[idx].
size() - 1 ) / 2 );
1621 dbgOut.
printf( 5,
"Received buffer of size %lu from proc %d\n", (
unsigned long)( 2 * n + 1 ),
1622 (
int)status.MPI_SOURCE );
1624 for(
size_t i = 0; i < n; ++i )
1629 assert( handle != 0 );
1630 if( !
idMap.
insert( handle, recv_buf[idx][2 * i + 2], 1 ).second )
1631 error( MB_FAILURE );
1634 recv_req[idx] = MPI_REQUEST_NULL;
1636 assert( MPI_SUCCESS == MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status ) &&
1637 MPI_UNDEFINED == idx );
1642 std::vector< MPI_Status > stats( send_req.size() );
1643 mperr = MPI_Waitall( send_req.size(), &send_req[0], &stats[0] );
1680 unsigned char flags;
1681 std::vector< wid_t > tmp;
1685 assert( buffer_size >= 4 );
1704 if( len && !( flags & MESHSET_ORDERED ) )
1707 bool blocked =
false;
1708 assert( ( 0 == len % 2 ) );
1718 tmp.resize( newlen );
1723 if( tmp.size() <= buffer_size ) std::copy( tmp.begin(), tmp.end(),
buffer + 4 );
1729 tmp.resize( newlen );
1731 if( tmp.size() <= buffer_size -
buffer[1] ) std::copy( tmp.begin(), tmp.end(),
buffer + 4 +
buffer[1] );
1737 tmp.resize( newlen );
1739 if( tmp.size() <= buffer_size -
buffer[1] -
buffer[2] )
1745 template <
typename TYPE >
1754 result.resize( len * 2 );
1756 for(
size_t i = 0; i < len; i++ )
1758 result.resize( tmp.
psize() * 2 );
1762 result[2 * j] = pit->first;
1763 result[2 * j + 1] = pit->second - pit->first + 1;
1767 static void merge_ranged_ids(
const unsigned long* range_list,
size_t len, std::vector< WriteHDF5::wid_t >& result )
1770 assert( 0 == len % 2 );
1771 assert( 0 == result.size() % 2 );
1772 STATIC_ASSERT(
sizeof( std::pair< wid_t, wid_t > ) == 2 *
sizeof( wid_t ) );
1774 result.insert( result.end(), range_list, range_list + len );
1775 size_t plen = result.size() / 2;
1777 for(
size_t i = 0; i < plen; i++ )
1781 tmp.
insert( starth, endh );
1784 result.resize( tmp.
psize() * 2 );
1788 result[2 * j] = pit->first;
1789 result[2 * j + 1] = pit->second - pit->first + 1;
1793 static void merge_vector_ids(
const unsigned long* list,
size_t len, std::vector< WriteHDF5::wid_t >& result )
1795 result.insert( result.end(), list, list + len );
1801 assert( buffer_size >= 4 );
1803 const unsigned long flags =
buffer[0];
1804 unsigned long num_content =
buffer[1];
1805 const unsigned long num_child =
buffer[2];
1806 const unsigned long num_parent =
buffer[3];
1807 const unsigned long* contents =
buffer + 4;
1808 const unsigned long*
children = contents + num_content;
1809 const unsigned long* parents =
children + num_child;
1812 assert( NULL != data );
1813 if( NULL == data )
return MB_FAILURE;
1824 std::vector< wid_t > tmp;
1837 num_content = tmp.size();
1838 if(
sizeof(
wid_t ) <
sizeof(
long ) )
1840 size_t old_size = tmp.size();
1841 tmp.resize(
sizeof(
long ) * old_size /
sizeof(
wid_t ) );
1842 unsigned long* array =
reinterpret_cast< unsigned long*
>( &tmp[0] );
1843 for(
long i = ( (
long)old_size ) - 1; i >= 0; --i )
1847 else if(
sizeof(
wid_t ) >
sizeof( long ) )
1849 unsigned long* array =
reinterpret_cast< unsigned long*
>( &tmp[0] );
1850 std::copy( tmp.begin(), tmp.end(), array );
1852 contents =
reinterpret_cast< unsigned long*
>( &tmp[0] );
1873 dbgOut.
tprintf( 1,
"COMMUNICATING SHARED SET DATA (%lu owned & %lu remote)\n", (
unsigned long)owned.
size(),
1874 (
unsigned long)remote.
size() );
1878 std::vector< unsigned > procs;
1880 Range shared( owned );
1881 shared.
merge( remote );
1887 nummess += procs.size();
1895 const size_t MAX_BUFFER_MEM = 32 * 1024 * 1024 /
sizeof( long );
1897 const size_t INIT_BUFFER_SIZE = 1024;
1898 const size_t MIN_BUFFER_SIZE = 16;
1899 size_t init_buff_size = INIT_BUFFER_SIZE;
1900 if( init_buff_size * nummess > MAX_BUFFER_MEM ) init_buff_size = MAX_BUFFER_MEM / nummess;
1901 if( init_buff_size < MIN_BUFFER_SIZE ) init_buff_size = MIN_BUFFER_SIZE;
1903 dbgOut.
printf( 2,
"Using buffer size of %lu for an expected message count of %lu\n", (
unsigned long)init_buff_size,
1904 (
unsigned long)nummess );
1913 numrecv += procs.size();
1914 if( std::find( procs.begin(), procs.end(), rank ) != procs.end() ) --numrecv;
1918 std::vector< MPI_Request > recv_req( numrecv, MPI_REQUEST_NULL );
1919 std::vector< MPI_Request > lrecv_req( numrecv, MPI_REQUEST_NULL );
1921 std::vector< std::vector< unsigned long > > recv_buf( numrecv, std::vector< unsigned long >( init_buff_size ) );
1928 for(
size_t j = 0; j < procs.size(); ++j )
1930 if( procs[j] == rank )
continue;
1939 dbgOut.
printf( 5,
"Posting buffer to receive set %d from proc %u\n", tag, procs[j] );
1941 MPI_Irecv( &recv_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, procs[j], tag, comm, &recv_req[idx] );
1946 assert( (
size_t)idx == numrecv );
1949 std::vector< MPI_Request > send_req( remote.
size() );
1950 std::vector< std::vector< unsigned long > > send_buf( remote.
size() );
1954 send_buf[idx].resize( init_buff_size );
1955 rval =
pack_set( i, &send_buf[idx][0], init_buff_size );
1964 dbgOut.
printf( 5,
"Sending %lu values for set %d to proc %u\n",
1965 send_buf[idx][1] + send_buf[idx][2] + send_buf[idx][3] + 4, tag, owner );
1966 mperr = MPI_Isend( &send_buf[idx][0], init_buff_size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
1989 std::vector< EntityHandle > list;
1990 if( data.
setFlags & MESHSET_ORDERED )
2031 size_t remaining = numrecv;
2033 while( remaining-- )
2035 std::vector< unsigned long > dead;
2037 mperr = MPI_Waitany( recv_req.size(), &recv_req[0], &idx, &status );
2040 std::vector< unsigned long >& buff = recv_buf[idx];
2041 size_t size = buff[1] + buff[2] + buff[3] + 4;
2042 dbgOut.
printf( 5,
"Received %lu values for set %d from proc %d\n", (
unsigned long)
size, status.MPI_TAG,
2043 status.MPI_SOURCE );
2044 if(
size <= init_buff_size )
2046 rval =
unpack_set( handle, &buff[0], init_buff_size );
2054 buff.resize(
size );
2055 dbgOut.
printf( 5,
"Re-Posting buffer to receive set %d from proc %d with size %lu\n", status.MPI_TAG,
2056 status.MPI_SOURCE, (
unsigned long)
size );
2057 mperr = MPI_Irecv( &buff[0],
size, MPI_UNSIGNED_LONG, status.MPI_SOURCE, status.MPI_TAG, comm,
2062 recv_req[idx] = MPI_REQUEST_NULL;
2066 MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2072 std::vector< unsigned long >& buff = send_buf[idx];
2073 size_t size = buff[1] + buff[2] + buff[3] + 4;
2074 if(
size <= init_buff_size )
continue;
2076 buff.resize(
size );
2086 dbgOut.
printf( 5,
"Sending %lu values for set %d to proc %u\n", (
unsigned long)
size, tag, owner );
2087 mperr = MPI_Isend( &buff[0],
size, MPI_UNSIGNED_LONG, owner, tag, comm, &send_req[idx] );
2092 remaining = numrecv;
2093 while( remaining-- )
2095 std::vector< unsigned long > dead;
2097 mperr = MPI_Waitany( lrecv_req.size(), &lrecv_req[0], &idx, &status );
2100 std::vector< unsigned long >& buff = recv_buf[idx];
2101 dbgOut.
printf( 5,
"Received %lu values for set %d from proc %d\n", 4 + buff[1] + buff[2] + buff[3],
2102 status.MPI_TAG, status.MPI_SOURCE );
2103 rval =
unpack_set( handle, &buff[0], buff.size() );
2107 lrecv_req[idx] = MPI_REQUEST_NULL;
2111 MPI_Waitall( send_req.size(), &send_req[0], MPI_STATUSES_IGNORE );
2127 Range shared, owned, remote;
2134 remote =
subtract( shared, owned );
2166 long data_counts[3];
2171 long offsets[3], max_counts[3], totals[3];
2172 rval =
create_dataset( 3, data_counts, offsets, max_counts, totals );
2193 dbgOut.
printf( 2,
"set contents: %ld local, %ld global, offset = %ld\n", data_counts[0], totals[0], offsets[0] );
2194 dbgOut.
printf( 2,
"set children: %ld local, %ld global, offset = %ld\n", data_counts[1], totals[1], offsets[1] );
2195 dbgOut.
printf( 2,
"set parents: %ld local, %ld global, offset = %ld\n", data_counts[2], totals[2], offsets[2] );
2205 for( std::list< ExportSet >::iterator eiter =
exportList.begin(); eiter !=
exportList.end(); ++eiter )
2213 result.
merge( sets );
2214 range.
swap( result );
2222 range.
swap( result );
2228 for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2232 unsigned int read, write;
2233 for( read = write = 0; read < vect.size(); ++read )
2235 if( intrsct.
find( vect[read] ) != intrsct.
end() )
2237 if( read != write ) vect[write] = vect[read];
2241 if( write != vect.size() ) vect.resize( write );
2247 for( std::vector< EntityHandle >::const_iterator iter = vect.begin(); iter != vect.end(); ++iter )
2251 unsigned int read, write;
2252 for( read = write = 0; read < vect.size(); ++read )
2254 if( intrsct.
find( vect[read] ) != intrsct.
end() )
2256 if( read != write ) vect[write] = vect[read];
2260 if( write != vect.size() ) vect.resize( write );
2275 for( std::list< ExportSet >::reverse_iterator i =
exportList.rbegin(); i !=
exportList.rend(); ++i )
2289 Tag file_id_tag = 0;
2295 std::vector< EntityHandle > file_id_vect( imesh.
size() );
2297 std::vector< EntityHandle >::iterator j = file_id_vect.begin();
2298 for( i = imesh.
begin(); i != imesh.
end(); ++i, ++j )
2304 return error( MB_FAILURE );
2311 return error( rval );
2319 return error( rval );
2323 file_id_vect.resize( nonlocal.
size() );
2328 return error( rval );
2331 j = file_id_vect.begin();
2332 for( i = nonlocal.
begin(); i != nonlocal.
end(); ++i, ++j )
2341 <<
" did not receive valid id handle for shared " << name <<
" " <<
id
2342 <<
" owned by process " << owner );
2344 "Did not receive valid remote id for "
2345 "shared %s %d owned by process %d",
2348 return error( MB_FAILURE );
2355 return error( MB_FAILURE );
2364 file_id_vect.resize( imesh.
size() );
2369 return error( rval );
2371 int invalid_count = 0;
2372 j = file_id_vect.begin();
2373 for( i = imesh.
begin(); i != imesh.
end(); ++i, ++j )
2386 MB_SET_ERR( MB_FAILURE, invalid_count <<
" entities with conflicting ownership found by process "
2388 <<
". This will result in duplicate entities written to file" );