26 MPI_Init( &argc, &argv );
38 if( argc > 2 ) nbComms = atoi( argv[2] );
40 options =
"PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
44 if( NULL ==
mb )
return 1;
47 int global_rank, global_size;
48 MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
49 MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
51 int color = global_rank % nbComms;
55 MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
58 comm = MPI_COMM_WORLD;
66 assert( rcomm == comm );
68 if( 0 == global_rank )
69 cout <<
" global rank:" << global_rank <<
" color:" << color <<
" rank:" << rank <<
" of " << nprocs
72 if( 1 == global_rank )
73 cout <<
" global rank:" << global_rank <<
" color:" << color <<
" rank:" << rank <<
" of " << nprocs
76 MPI_Barrier( MPI_COMM_WORLD );
78 if( 0 == global_rank )
79 cout <<
"Reading file " <<
test_file_name <<
"\n with options: " << options <<
"\n on " << nprocs
80 <<
" processors on " << nbComms <<
" communicator(s)\n";
93 unsigned int nums[4] = { 0 };
94 for(
int i = 0; i < 4; i++ )
96 vector< int > rbuf( nprocs * 4, 0 );
97 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
99 if( 0 == global_rank )
101 for(
int i = 0; i < nprocs; i++ )
102 cout <<
" Shared, owned entities on proc " << i <<
": " << rbuf[4 * i] <<
" verts, " << rbuf[4 * i + 1]
103 <<
" edges, " << rbuf[4 * i + 2] <<
" faces, " << rbuf[4 * i + 3] <<
" elements" << endl;
116 owned_entities.
clear();
121 for(
int i = 0; i < 4; i++ )
125 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
126 if( 0 == global_rank )
128 cout <<
" \n\n After exchanging one ghost layer: \n";
129 for(
int i = 0; i < nprocs; i++ )
131 cout <<
" Shared, owned entities on proc " << i <<
": " << rbuf[4 * i] <<
" verts, " << rbuf[4 * i + 1]
132 <<
" edges, " << rbuf[4 * i + 2] <<
" faces, " << rbuf[4 * i + 3] <<
" elements" << endl;
140 std::cout <<
" compile with MPI and hdf5 for this example to work\n";