16 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
25 ErrorCode val = ( a ); \
26 if( MB_SUCCESS != val ) \
28 std::cerr << "Error code " << val << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
34 if( is_any_proc_error( !( A ) ) ) return report_error( __FILE__, __LINE__ )
38 std::cerr <<
"Failure at " << file <<
':' << line << std::endl;
93 int output_vertx_ids[9],
95 Range& output_elements,
150 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
152 int run_test(
ErrorCode ( *func )(
const char* ),
const char* func_name,
const char* file_name )
154 ErrorCode result = ( *func )( file_name );
161 std::cout << func_name <<
" : FAILED!!" << std::endl;
163 std::cout << func_name <<
" : success" << std::endl;
169 int main(
int argc,
char* argv[] )
172 MPI_Init( &argc, &argv );
178 for(
int i = 1; i < argc; ++i )
180 if( !strcmp( argv[i],
"-p" ) )
184 pause_proc = atoi( argv[i] );
192 std::cerr <<
"Invalid arg: \"" << argv[i] <<
'"' << std::endl
193 <<
"Usage: " << argv[0] <<
" [-p <rank>] [<filename>]" << std::endl;
200 #ifdef MOAB_HAVE_HDF5
201 filename = TestDir +
"unittest/64bricks_512hex.h5m";
203 filename = TestDir +
"unittest/64bricks_512hex.vtk";
206 std::cout <<
"Loading " <<
filename <<
"..\n";
207 #ifdef MOAB_HAVE_HDF5
208 std::string
filename2 = TestDir +
"unittest/64bricks_1khex.h5m";
209 std::string filename3 = TestDir +
"unittest/twoPolyh.h5m";
210 std::string filename4 = TestDir +
"unittest/onepart.h5m";
213 if( pause_proc != -1 )
215 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
216 std::cout <<
"Processor " <<
rank <<
" of " <<
size <<
" with PID " << getpid() << std::endl;
222 if( pause_proc ==
rank )
224 volatile int pause = 1;
230 std::cout <<
"Processor " <<
rank <<
" resuming" << std::endl;
234 #ifdef MOAB_HAVE_HDF5
260 std::cout <<
"All tests passed" << std::endl;
262 std::cout << num_errors <<
" TESTS FAILED!" << std::endl;
278 Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
285 other_procs_out.clear();
287 rval =
moab.tag_get_data( pstatus_tag, &entity, 1, &status );
CHKERR( rval );
291 rval =
moab.tag_get_data( sharedp_tag, &entity, 1, &proc_id );
CHKERR( rval );
294 other_procs_out.push_back( proc_id );
299 rval =
moab.tag_get_data( sharedps_tag, &entity, 1, procs );
CHKERR( rval );
300 for(
int i = 0; i < MAX_SHARING_PROCS && procs[i] >= 0; ++i )
301 other_procs_out.push_back( procs[i] );
308 int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX,
MPI_COMM_WORLD );
309 return err || result;
355 const int first_vtx_id = 10 * (
rank / 2 ) + 2 * (
rank % 2 ) + 1;
356 const double x = 2.0 * (
rank / 2 );
357 const double y = 2.0 * (
rank % 2 );
360 const int idoff = (
size % 2 &&
rank / 2 ==
size / 2 ) ? 0 : 2;
361 const int idoff1 =
rank ? 2 : idoff;
362 const int idoff2 = idoff1 + idoff;
363 const int ids[9] = { first_vtx_id, first_vtx_id + 3 + idoff1, first_vtx_id + 6 + idoff2,
364 first_vtx_id + 1, first_vtx_id + 4 + idoff1, first_vtx_id + 7 + idoff2,
365 first_vtx_id + 2, first_vtx_id + 5 + idoff1, first_vtx_id + 8 + idoff2 };
366 memcpy( vtx_ids, ids,
sizeof( ids ) );
367 const double coords[27] = { x, y, 0, x + 1, y, 0, x + 2, y, 0, x, y + 1, 0, x + 1, y + 1,
368 0, x + 2, y + 1, 0, x, y + 2, 0, x + 1, y + 2, 0, x + 2, y + 2, 0 };
374 assert( range.
size() == 9 );
375 std::copy( range.
begin(), range.
end(), vtx_handles );
380 const EntityHandle conn[4][4] = { { vtx_handles[0], vtx_handles[3], vtx_handles[4], vtx_handles[1] },
381 { vtx_handles[1], vtx_handles[4], vtx_handles[5], vtx_handles[2] },
382 { vtx_handles[3], vtx_handles[6], vtx_handles[7], vtx_handles[4] },
383 { vtx_handles[4], vtx_handles[7], vtx_handles[8], vtx_handles[5] } };
384 for(
int i = 0; i < 4; ++i )
386 const int id = 4 *
rank + i + 1;
399 entity_sets[nsets] = 0;
402 set_ids[1] = set_ids[2];
404 else if(
rank / 2 < (
size - 1 ) / 2 )
408 for(
int i = 0; i < nsets; ++i )
426 const char* geom_names[] = {
"vertex",
"curve",
"surface",
"volume",
"unknown" };
429 "PARALLEL=READ_DELETE;"
430 "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
431 "PARTITION_DISTRIBUTE;"
432 "PARALLEL_RESOLVE_SHARED_ENTS;"
433 "PARALLEL_SEQUENCE_FACTOR=1.4" );
CHKERR( rval );
442 std::cerr <<
"check_all_shared_handles test failed on proc " << pcomm->
proc_config().
proc_rank() << std::endl;
458 Range shared, invalid;
462 const void* tagvals[] = { &
dim };
468 rval =
moab.get_entities_by_dimension( *j,
dim, ents );
CHKERR( rval );
469 if( ents.
empty() )
continue;
473 std::vector< int > procs;
475 std::sort( procs.begin(), procs.end() );
476 if( procs.size() > 1 ) shared.
merge( ents );
479 for( ++k; k != ents.
end(); ++k )
481 std::vector< int > tmp_procs;
483 std::sort( tmp_procs.begin(), tmp_procs.end() );
484 if( tmp_procs != procs ) invalid.
insert( *j );
489 rval =
moab.get_entities_by_dimension( *j, 0, ents );
CHKERR( rval );
490 for( k = ents.
begin(); k != ents.
end(); ++k )
492 std::vector< int > tmp_procs;
494 if( tmp_procs != procs ) invalid.
insert( *j );
500 if( !invalid.
empty() )
502 std::cerr <<
"Elements or vertices owned by a single geometric entity are "
503 <<
"not shared by the same set of processors for the "
511 rval =
moab.tag_get_data(
id_tag, &*i, 1, &
id );
513 std::cerr << geom_names[
dim] <<
" " <<
id <<
", ";
515 std::cerr << std::endl;
525 std::vector< int > procs;
527 std::sort( procs.begin(), procs.end() );
529 std::vector< EntityHandle > tmp;
532 rval =
moab.get_connectivity( *i, conn, len,
false, &tmp );
CHKERR( rval );
533 for(
int j = 0; j < len; ++j )
535 std::vector< int > vprocs;
537 std::sort( vprocs.begin(), vprocs.end() );
538 std::vector< int > diff( std::max( procs.size(), vprocs.size() ) );
539 std::vector< int >::iterator k =
540 std::set_difference( procs.begin(), procs.end(), vprocs.begin(), vprocs.end(), diff.begin() );
541 if( k != diff.begin() )
542 invalid.
insert( conn[j] );
547 if( !invalid.
empty() )
549 std::cerr <<
"Vertices must be shared with at least the union of the processes "
550 <<
"sharing the elements containing the vertex. This is NOT true for "
567 std::vector< unsigned char > flags( all_ents.
size() );
571 std::vector< unsigned char >::const_iterator f = flags.begin();
581 const std::vector< int >& ids,
585 for(
size_t i = 0; i < ids.size(); ++i )
587 const void* tag_vals[2] = { &dimension, &ids[i] };
593 rval =
moab.get_entities_by_dimension( *j, dimension, ents );
CHKERR( rval );
594 results.
merge( ents );
607 const std::vector< int > partition_geom_ids[4],
608 std::vector< int >& ghost_entity_ids,
610 int bridge_dimension,
616 tags[1] =
moab.globalId_tag();
623 Range iface_ghosts, iface_ents;
624 if( bridge_dimension == 2 )
632 for(
int n = 0; n < num_layers; ++n )
634 iface_ghosts.
clear();
646 if( ghost_dimension == 3 )
657 ghost_entity_ids.resize( ghosts.
size() );
658 rval =
moab.tag_get_data( tags[1], ghosts, &ghost_entity_ids[0] );
CHKERR( rval );
668 std::ostringstream file_opts;
669 file_opts <<
"PARALLEL=READ_DELETE;"
670 <<
"PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
671 <<
"PARTITION_DISTRIBUTE;"
672 <<
"PARALLEL_RESOLVE_SHARED_ENTS;"
673 <<
"PARALLEL_GHOSTS=" << ghost_dimension <<
'.' << bridge_dimension <<
'.' << num_layers;
681 Range partition_geom[4];
684 PCHECK( !partition_geom[3].empty() );
695 for(
Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].
end(); ++i )
698 rval =
moab.get_child_meshsets( *i, tmp );
CHKERR( rval );
708 std::ostringstream error_msg;
709 Range ents, iface_surfs, iface_curves, iface_vertices;
713 rval =
moab.get_entities_by_dimension( *i, ghost_dimension - 1, ents );
CHKERR( rval );
714 if( ents.
empty() )
continue;
716 std::vector< int > procs, tmp_procs;
719 for( ++j; !
error && j != ents.
end(); ++j )
723 if( tmp_procs != procs )
725 error_msg <<
"Failure at " << __FILE__ <<
':' << __LINE__ << std::endl
726 <<
"\tNot all entities in geometric surface are shared with"
727 <<
" same processor." << std::endl;
736 if( procs.empty() )
continue;
737 if( procs.size() != 1 )
739 error_msg <<
"Failure at " << __FILE__ <<
':' << __LINE__ << std::endl
740 <<
"\tSurface elements shared with" << procs.size() <<
"processors." << std::endl;
744 int other_rank = procs[0];
747 partition_geom[2].
insert( *i );
749 rval =
moab.get_child_meshsets( *i, ents );
CHKERR( rval );
750 partition_geom[1].
merge( ents );
758 std::cerr << error_msg.str();
762 for(
Range::iterator i = partition_geom[1].begin(); i != partition_geom[1].
end(); ++i )
765 rval =
moab.get_child_meshsets( *i, ents );
CHKERR( rval );
766 partition_geom[0].
merge( ents );
769 std::vector< int > partn_geom_ids[4];
772 partn_geom_ids[
dim].resize( partition_geom[
dim].
size() );
781 std::vector< int > actual_ghost_ent_ids( ghost_ents.
size() );
782 rval =
moab.tag_get_data(
id_tag, ghost_ents, &actual_ghost_ent_ids[0] );
CHKERR( rval );
790 std::vector< int > expected_ghost_ent_ids;
791 rval =
get_expected_ghosts( moab2, partn_geom_ids, expected_ghost_ent_ids, ghost_dimension, bridge_dimension,
796 std::sort( actual_ghost_ent_ids.begin(), actual_ghost_ent_ids.end() );
797 std::sort( expected_ghost_ent_ids.begin(), expected_ghost_ent_ids.end() );
798 PCHECK( expected_ghost_ent_ids == actual_ghost_ent_ids );
803 for(
Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].
end(); ++i )
806 rval =
moab.get_entities_by_dimension( *i, 3, ents );
CHKERR( rval );
807 myents.
merge( ents );
809 if( ghost_dimension != 3 )
816 myents.
merge( ghost_ents );
818 rval =
moab.get_entities_by_dimension( 0, ghost_dimension, ents );
868 "PARALLEL=READ_DELETE;"
869 "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
870 "PARTITION_DISTRIBUTE;"
871 "PARALLEL_RESOLVE_SHARED_ENTS;"
872 "PARALLEL_GHOSTS=3.2.1;"
873 "PARALLEL_SEQUENCE_FACTOR=1.5" );
CHKERR( rval );
878 rval =
moab.get_entities_by_dimension( 0, 3, local );
CHKERR( rval );
880 while( i != local.
end() )
891 i = local.
erase( i );
903 std::vector< EntityHandle > handles( local.
size() ), handles2;
904 std::copy( local.
begin(), local.
end(), handles.begin() );
905 rval =
moab.tag_set_data( dense_test_tag, local, &handles[0] );
CHKERR( rval );
912 handles2.resize( local.
size() );
913 rval =
moab.tag_get_data( dense_test_tag, local, &handles2[0] );
CHKERR( rval );
914 PCHECK( handles == handles2 );
917 handles.resize( ghosts.
size() );
918 handles2.resize( ghosts.
size() );
919 rval =
moab.tag_get_data( dense_test_tag, ghosts, &handles2[0] );
CHKERR( rval );
921 PCHECK( handles == handles2 );
931 rval =
moab.tag_set_data( sparse_test_tag, local, &procs1[0] );
CHKERR( rval );
939 std::vector< int > procs2( local.
size() );
940 rval =
moab.tag_get_data( sparse_test_tag, local, &procs2[0] );
CHKERR( rval );
941 PCHECK( procs1 == procs2 );
944 procs1.resize( ghosts.
size() );
945 procs2.resize( ghosts.
size() );
946 rval =
moab.tag_get_data( sparse_test_tag, ghosts, &procs2[0] );
CHKERR( rval );
947 std::vector< int >::iterator j = procs1.begin();
948 for( i = ghosts.
begin(); i != ghosts.
end(); ++i, ++j )
952 PCHECK( procs1 == procs2 );
963 #ifdef MOAB_HAVE_HDF5
965 "PARALLEL=READ_DELETE;"
966 "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
967 "PARTITION_DISTRIBUTE;"
968 "PARALLEL_RESOLVE_SHARED_ENTS;"
969 "PARALLEL_GHOSTS=3.2.1" );
972 "PARALLEL=READ_BCAST;"
973 "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
974 "PARTITION_DISTRIBUTE;"
975 "PARALLEL_RESOLVE_SHARED_ENTS;"
976 "PARALLEL_GHOSTS=3.2.1" );
999 int err,
rank, bytes;
1001 MPI_Type_size( val_type, &bytes );
1003 std::vector< unsigned char > buffer2( bytes );
1005 for(
int i = 0; i < 2; ++i )
1007 if( i == (
rank < other_proc ) )
1010 if( err )
return err;
1015 err = MPI_Recv( &buffer2[0], num_val, val_type, other_proc, 0,
MPI_COMM_WORLD, &status );
1016 if( err )
return err;
1020 memcpy(
buffer, &buffer2[0], bytes );
1027 std::map< int, std::vector< int > > verts;
1028 for(
int p = 0; p < comm_size; ++p )
1030 for(
int i = 0; i < 9; ++i )
1032 int idx = 9 * p + i;
1033 verts[ids[idx]].push_back( p );
1034 verts[ids[idx]].push_back( owners[idx] );
1040 bool print_desc =
true;
1042 std::map< int, std::vector< int > >::iterator it;
1043 for( it = verts.begin(); it != verts.end(); ++it )
1046 std::vector< int >& list = it->second;
1047 bool all_same =
true;
1048 for(
size_t i = 2; i < list.size(); i += 2 )
1049 if( list[i + 1] != list[1] ) all_same =
false;
1050 if( all_same )
continue;
1057 std::cerr <<
"ERROR at " __FILE__
":" << __LINE__ << std::endl
1058 <<
" Processors have inconsistant ideas of vertex ownership:" << std::endl;
1061 std::cerr <<
" Vertex " <<
id <<
": " << std::endl;
1062 for(
size_t i = 0; i < list.size(); i += 2 )
1063 std::cerr <<
" Proc " << list[i] <<
" thinks owner is " << list[i + 1] << std::endl;
1084 if( num_ghost_layers )
1092 for(
int i = 0; i < 9; ++i )
1094 rval = pcomm.
get_owner( verts[i], owner[i] );
1103 std::vector< int > all_ids( 9 *
size ), all_owner( 9 *
size );
1105 if(
ierr )
return MB_FAILURE;
1106 ierr = MPI_Gather( owner, 9, MPI_INT, &all_owner[0], 9, MPI_INT, 0,
MPI_COMM_WORLD );
1107 if(
ierr )
return MB_FAILURE;
1164 const int* orig_ids,
1166 const char* singular_name,
1167 const char* plural_name )
1175 std::vector< int > new_ids( num_ents );
1193 std::vector< int > all_orig_ids( num_ents *
size ), all_new_ids( num_ents *
size );
1194 ierr = MPI_Gather( (
void*)orig_ids, num_ents, MPI_INT, &all_orig_ids[0], num_ents, MPI_INT, 0,
MPI_COMM_WORLD );
1195 if(
ierr )
return MB_FAILURE;
1196 ierr = MPI_Gather( &new_ids[0], num_ents, MPI_INT, &all_new_ids[0], num_ents, MPI_INT, 0,
MPI_COMM_WORLD );
1197 if(
ierr )
return MB_FAILURE;
1206 std::map< int, int > idmap;
1207 std::map< int, int > owner;
1208 for(
int i = 0; i < num_ents *
size; ++i )
1210 std::map< int, int >::iterator it = idmap.find( all_orig_ids[i] );
1211 if( it == idmap.end() )
1213 idmap[all_orig_ids[i]] = all_new_ids[i];
1214 owner[all_orig_ids[i]] = i / num_ents;
1216 else if( it->second != all_new_ids[i] )
1218 std::cerr <<
"Inconsistant " << singular_name <<
" IDs between processors " << owner[all_orig_ids[i]]
1219 <<
" and " << i / num_ents <<
" : " << it->second <<
" and " << all_new_ids[i]
1220 <<
" respectively." << std::endl;
1227 for(
int i = 0; i < num_ents *
size; ++i )
1229 std::map< int, int >::iterator it = idmap.find( all_new_ids[i] );
1230 if( it == idmap.end() )
1232 idmap[all_new_ids[i]] = all_orig_ids[i];
1233 owner[all_new_ids[i]] = i / num_ents;
1235 else if( it->second != all_orig_ids[i] )
1237 std::cerr <<
"ID " << all_new_ids[i] <<
" assigned to different " << plural_name <<
" on processors "
1238 << owner[all_new_ids[i]] <<
" and " << i / num_ents << std::endl;
1268 assert( 4u == quad_range.
size() );
1270 std::copy( quad_range.
begin(), quad_range.
end(), quads );
1275 int zero[9] = { 0 };
1300 const unsigned rank = rank_i;
1301 const unsigned nproc = size_i;
1311 sets.
insert( set_arr[0] );
1312 sets.
insert( set_arr[1] );
1315 sets.
insert( set_arr[2] );
1319 set_arr[2] = set_arr[1];
1328 for(
size_t i = 0; i < 3; ++i )
1335 else if( owner ==
rank )
1337 if( owner_handle != set_arr[i] )
1339 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank <<
"invalid remote handle for owned set"
1346 else if( local != set_arr[i] )
1348 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank <<
"invalid local handle for remote data"
1355 const unsigned col =
rank / 2;
1356 const unsigned colrank =
1358 unsigned mins[3] = { 0, 0, 0 };
1359 unsigned maxs[3] = { nproc - 1, 0, 0 };
1362 mins[1] = mins[2] = 0;
1363 maxs[1] = maxs[2] = std::min( 3u, nproc - 1 );
1365 else if( col == ( nproc - 1 ) / 2 )
1367 mins[1] = mins[2] = colrank - 2;
1368 maxs[1] = maxs[2] = std::min( colrank + 1, nproc - 1 );
1372 mins[1] = colrank - 2;
1374 maxs[1] = std::min( colrank + 1, nproc - 1 );
1375 maxs[2] = std::min( colrank + 3, nproc - 1 );
1380 std::vector< unsigned > expected, list;
1381 for(
size_t i = 0; i < 3; ++i )
1384 for(
unsigned r = mins[i]; r <= std::min( maxs[i], nproc - 1 ); ++r )
1385 if( r !=
rank ) expected.push_back( r );
1392 std::sort( list.begin(), list.end() );
1393 if( expected != list )
1395 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank <<
" incorrect sharing list for entity set"
1404 unsigned send_list[6], set_owners[3];
1405 std::vector< unsigned > recv_list( 6 * nproc );
1406 for(
size_t i = 0; i < 3; ++i )
1410 send_list[2 * i + 1] = set_owners[i];
1412 MPI_Allgather( send_list, 6, MPI_UNSIGNED, &recv_list[0], 6, MPI_UNSIGNED,
MPI_COMM_WORLD );
1413 std::map< unsigned, unsigned > owners;
1414 for(
unsigned i = 0; i < 6 * nproc; i += 2 )
1416 unsigned id = recv_list[i];
1417 unsigned owner = recv_list[i + 1];
1418 if( owners.find(
id ) == owners.end() )
1420 else if( owners[
id] != owner )
1422 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank <<
" mismatched owners (" << owners[id]
1423 <<
" and " << owner <<
") for set with ID " <<
id << std::endl;
1430 std::vector< unsigned > act_owners, exp_owners;
1431 for(
size_t i = 0; i < 3; ++i )
1433 exp_owners.push_back( set_owners[i] );
1438 std::sort( act_owners.begin(), act_owners.end() );
1439 std::sort( exp_owners.begin(), exp_owners.end() );
1440 exp_owners.erase( std::unique( exp_owners.begin(), exp_owners.end() ), exp_owners.end() );
1441 PCHECK( exp_owners == act_owners );
1445 for(
unsigned i = 0; i < nproc; ++i )
1450 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank
1451 <<
" failed to get shared set list for sets owned by rank " << set_owners[i] << std::endl;
1455 Range expected_range;
1456 for(
size_t j = 0; j < 3; ++j )
1457 if( set_owners[j] == i ) expected_range.
insert( set_arr[j] );
1459 if( expected_range != sets )
1461 std::cerr << __FILE__ <<
":" << __LINE__ <<
" rank " <<
rank
1462 <<
" has incorrect shared set list for sets owned by rank " << set_owners[i] << std::endl
1463 <<
"Expected: " << expected_range << std::endl
1464 <<
"Actual: " << sets << std::endl;
1473 template <
typename T >
1482 std::vector< T > dum_vals( shared_ents.
size() );
1484 unsigned char pstatus;
1491 for( rit = shared_ents.
begin(); rit != shared_ents.
end(); ++rit, i++ )
1495 bool with_root = std::find( &shprocs[0], &shprocs[
np], 0 ) - &shprocs[0] !=
np || !pcomm.
rank();
1496 if( mpi_op == MPI_SUM )
1498 if( dum_vals[i] != fact *
np )
return MB_FAILURE;
1500 else if( mpi_op == MPI_PROD )
1502 if( dum_vals[i] != pow( fact,
np ) )
return MB_FAILURE;
1504 else if( mpi_op == MPI_MAX )
1506 if( with_root && dum_vals[i] != fact )
return MB_FAILURE;
1508 else if( mpi_op == MPI_MIN )
1510 if( with_root && dum_vals[i] != fact )
return MB_FAILURE;
1519 template <
typename T >
1543 std::vector< T > dum_vals;
1561 if( pcomm.
rank() == 0 )
1563 dum_vals.resize( dum_range.
size(), (
T)3 );
1573 if( pcomm.
rank() == 0 )
1575 std::fill( dum_vals.begin(), dum_vals.end(), (
T)-1 );
1613 std::vector< Tag > src_tags, dest_tags;
1615 src_tags.push_back( test_tag );
1617 dest_tags.push_back( test_dest );
1618 rval = pcomm.
reduce_tags( src_tags, dest_tags, MPI_MIN, dum_range );
1619 PCHECK( rval == MB_FAILURE );
1625 rval = pcomm.
reduce_tags( test_tag, MPI_MIN, dum_range );
1636 tmp_rval = test_reduce_tags< int >(
"test_reduce_tags (int)",
MB_TYPE_INTEGER );
1639 std::cout <<
"test_reduce_tags failed for int data type." << std::endl;
1643 tmp_rval = test_reduce_tags< double >(
"test_reduce_tags (dbl)",
MB_TYPE_DOUBLE );
1646 std::cout <<
"test_reduce_tags failed for double data type." << std::endl;
1674 Tag src_tag, dest_tag;
1676 double def_dbl = 5.0;
1679 std::vector< Tag > src_tags, dest_tags;
1682 src_tags.push_back( src_tag );
1684 dest_tags.push_back( dest_tag );
1687 std::vector< double > tag_vals( dum_range.
size(), 1.0 );
1689 std::fill( tag_vals.begin(), tag_vals.end(), 2.0 );
1692 rval = pcomm.
reduce_tags( src_tags, dest_tags, MPI_SUM, dum_range );
CHKERR( rval );
1694 rval = check_shared_ents< double >( pcomm, dest_tag, (
double)1.0, MPI_SUM );
CHKERR( rval );
1709 "PARALLEL=READ_PART;"
1710 "PARTITION=PARALLEL_PARTITION;"
1711 "PARALLEL_RESOLVE_SHARED_ENTS" );
CHKERR( rval );
1717 rval =
moab.get_entities_by_dimension( 0, 1, local );
CHKERR( rval );
1718 rval =
moab.get_entities_by_dimension( 0, 2, local );
CHKERR( rval );
1728 if(
mb_instance.is_valid( *it ) )
return MB_FAILURE;
1730 const char* opt =
"PARALLEL=WRITE_PART";
1731 rval =
moab.write_file(
"tmpx.h5m", 0, opt );
CHKERR( rval );
1744 "PARALLEL=READ_PART;"
1745 "PARTITION=PARALLEL_PARTITION;"
1746 "PARALLEL_RESOLVE_SHARED_ENTS;"
1747 "PARALLEL_GHOSTS=3.0.1" );
CHKERR( rval );
1758 "PARALLEL=READ_PART;"
1759 "PARTITION=PARALLEL_PARTITION;"
1760 "PARALLEL_RESOLVE_SHARED_ENTS;" );
1774 "PARALLEL=READ_PART;"
1775 "PARTITION=PARALLEL_PARTITION;"
1776 "PARALLEL_RESOLVE_SHARED_ENTS;"
1777 "PARALLEL_GHOSTS=3.2.1;"
1778 "PARALLEL_SEQUENCE_FACTOR=1.5" );
CHKERR( rval );
1782 rval =
moab.get_entities_by_dimension( 0, 3, elems );
CHKERR( rval );
1783 if( elems.
psize() != 1 )
1785 std::cout <<
" elems.psize() = " << elems.
psize() <<
"\n";
1791 rval =
moab.connect_iterate( elems.
begin(), elems.
end(), conn_ptr, vpere, count );
CHKERR( rval );
1793 if( count != (
int)elems.
size() )
1795 std::cout <<
" more than one sequence: elems.size() = " << elems.
size() <<
" count:" << count <<
"\n";
1800 void* globalid_data = NULL;
1802 if( count != (
int)elems.
size() )
1804 std::cout <<
" more than one tag sequence: elems.size() = " << elems.
size() <<
" count:" << count <<
"\n";
1811 rval =
moab.get_entities_by_dimension( 0, 0, verts );
CHKERR( rval );
1812 if( verts.
psize() != 1 )
1814 std::cout <<
" verts.psize() = " << verts.
psize() <<
"\n";
1818 double *x_ptr, *y_ptr, *z_ptr;
1819 rval =
moab.coords_iterate( verts.
begin(), verts.
end(), x_ptr, y_ptr, z_ptr, count );
CHKERR( rval );
1821 if( count != (
int)verts.
size() )
1823 std::cout <<
" more than one sequence: verts.size() = " << verts.
size() <<
" count:" << count <<
"\n";
1828 if( count != (
int)verts.
size() )
1830 std::cout <<
" more than one tag sequence: verts.size() = " << verts.
size() <<
" count:" << count <<
"\n";
1851 MPI_Comm_group( duplicate, &worldg );
1855 int rank[2] = { 0, 1 };
1857 MPI_Group_incl( worldg, 2,
rank, &gr1 );
1858 MPI_Group_incl( worldg, 1, &
rank[1], &gr2 );
1860 int comp1 = 10, comp2 = 12;
1863 std::map< int, Range > ranges_to_send;
1864 std::vector< int > number_elems_per_part;
1865 number_elems_per_part.push_back( 6 );
1866 number_elems_per_part.push_back( 10 );
1868 std::cout <<
" send sizes ";
1869 for(
int k = 0; k < (int)number_elems_per_part.size(); k++ )
1871 std::cout <<
" " << number_elems_per_part[k];
1878 Range verts( 10, 20 );
1881 for( std::map< int, Range >::iterator it = ranges_to_send.begin(); it != ranges_to_send.end(); it++ )
1883 Range& ran = it->second;
1884 std::cout <<
" receiver " << it->first <<
" receive range: [" << ran[0] <<
", " << ran[ran.
size() - 1]
1890 MPI_Group_free( &worldg );
1891 MPI_Group_free( &gr1 );
1892 MPI_Group_free( &gr2 );
1893 MPI_Comm_free( &duplicate );