1
10
11 #include "moab/Core.hpp"
12 #ifdef MOAB_HAVE_MPI
13 #include "moab/ParallelComm.hpp"
14 #endif
15 #include "MBParallelConventions.h"
16 #include <iostream>
17
18 using namespace moab;
19 using namespace std;
20
21 string test_file_name = string( MESH_DIR ) + string( "/64bricks_512hex_256part.h5m" );
22
23 int main( int argc, char** argv )
24 {
25 #ifdef MOAB_HAVE_MPI
26 MPI_Init( &argc, &argv );
27
28 string options;
29
30
31 if( argc > 1 )
32 {
33
34 test_file_name = argv[1];
35 }
36
37 int nbComms = 1;
38 if( argc > 2 ) nbComms = atoi( argv[2] );
39
40 options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
41
42
43 Interface* mb = new( std::nothrow ) Core;
44 if( NULL == mb ) return 1;
45
46 MPI_Comm comm;
47 int global_rank, global_size;
48 MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
49 MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
50
51 int color = global_rank % nbComms;
52 if( nbComms > 1 )
53 {
54
55 MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
56 }
57 else
58 comm = MPI_COMM_WORLD;
59
60
61 ParallelComm* pcomm = new ParallelComm( mb, comm );
62 int nprocs = pcomm->proc_config().proc_size();
63 int rank = pcomm->proc_config().proc_rank();
64 #ifndef NDEBUG
65 MPI_Comm rcomm = pcomm->proc_config().proc_comm();
66 assert( rcomm == comm );
67 #endif
68 if( 0 == global_rank )
69 cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
70 << " processors\n";
71
72 if( 1 == global_rank )
73 cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
74 << " processors\n";
75
76 MPI_Barrier( MPI_COMM_WORLD );
77
78 if( 0 == global_rank )
79 cout << "Reading file " << test_file_name << "\n with options: " << options << "\n on " << nprocs
80 << " processors on " << nbComms << " communicator(s)\n";
81
82
83 ErrorCode rval = mb->load_file( test_file_name.c_str(), 0, options.c_str() );MB_CHK_ERR( rval );
84
85 Range shared_ents;
86
87 rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
88
89
90 Range owned_entities;
91 rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
92
93 unsigned int nums[4] = { 0 };
94 for( int i = 0; i < 4; i++ )
95 nums[i] = (int)owned_entities.num_of_dimension( i );
96 vector< int > rbuf( nprocs * 4, 0 );
97 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
98
99 if( 0 == global_rank )
100 {
101 for( int i = 0; i < nprocs; i++ )
102 cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
103 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
104 }
105
106
107
108 rval = pcomm->exchange_ghost_cells( 3,
109 0,
110 1,
111 0,
112 true );MB_CHK_ERR( rval );
113
114
115 shared_ents.clear();
116 owned_entities.clear();
117 rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
118 rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
119
120
121 for( int i = 0; i < 4; i++ )
122 nums[i] = (int)owned_entities.num_of_dimension( i );
123
124
125 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
126 if( 0 == global_rank )
127 {
128 cout << " \n\n After exchanging one ghost layer: \n";
129 for( int i = 0; i < nprocs; i++ )
130 {
131 cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
132 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
133 }
134 }
135
136 delete mb;
137
138 MPI_Finalize();
139 #else
140 std::cout << " compile with MPI and hdf5 for this example to work\n";
141
142 #endif
143 return 0;
144 }