Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
HelloParMOAB.cpp
Go to the documentation of this file.
1 /** @example HelloParMOAB.cpp \n
2  * \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
3  * <b>To run</b>: mpiexec -np 4 HelloParMOAB [filename]\n
4  *
5  * It shows how to load the mesh independently, on multiple
6  * communicators (with second argument, the number of comms)
7  *
8  * mpiexec -np 8 HelloParMOAB [filename] [nbComms]
9  */
10 
11 #include "moab/Core.hpp"
12 #ifdef MOAB_HAVE_MPI
13 #include "moab/ParallelComm.hpp"
14 #endif
15 #include "MBParallelConventions.h"
16 #include <iostream>
17 
18 using namespace moab;
19 using namespace std;
20 
21 string test_file_name = string( MESH_DIR ) + string( "/64bricks_512hex_256part.h5m" );
22 
23 int main( int argc, char** argv )
24 {
25 #ifdef MOAB_HAVE_MPI
26  MPI_Init( &argc, &argv );
27 
28  string options;
29 
30  // Need option handling here for input filename
31  if( argc > 1 )
32  {
33  // User has input a mesh file
34  test_file_name = argv[1];
35  }
36 
37  int nbComms = 1;
38  if( argc > 2 ) nbComms = atoi( argv[2] );
39 
40  options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
41 
42  // Get MOAB instance
43  Interface* mb = new( std::nothrow ) Core;
44  if( NULL == mb ) return 1;
45 
46  MPI_Comm comm;
47  int global_rank, global_size;
48  MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
49  MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
50 
51  int color = global_rank % nbComms; // For each angle group a different color
52  if( nbComms > 1 )
53  {
54  // Split the communicator, into ngroups = nbComms
55  MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
56  }
57  else
58  comm = MPI_COMM_WORLD;
59 
60  // Get the ParallelComm instance
61  ParallelComm* pcomm = new ParallelComm( mb, comm );
62  int nprocs = pcomm->proc_config().proc_size();
63  int rank = pcomm->proc_config().proc_rank();
64 #ifndef NDEBUG
65  MPI_Comm rcomm = pcomm->proc_config().proc_comm();
66  assert( rcomm == comm );
67 #endif
68  if( 0 == global_rank )
69  cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
70  << " processors\n";
71 
72  if( 1 == global_rank )
73  cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
74  << " processors\n";
75 
76  MPI_Barrier( MPI_COMM_WORLD );
77 
78  if( 0 == global_rank )
79  cout << "Reading file " << test_file_name << "\n with options: " << options << "\n on " << nprocs
80  << " processors on " << nbComms << " communicator(s)\n";
81 
82  // Read the file with the specified options
83  ErrorCode rval = mb->load_file( test_file_name.c_str(), 0, options.c_str() );MB_CHK_ERR( rval );
84 
85  Range shared_ents;
86  // Get entities shared with all other processors
87  rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
88 
89  // Filter shared entities with not not_owned, which means owned
90  Range owned_entities;
91  rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
92 
93  unsigned int nums[4] = { 0 }; // to store the owned entities per dimension
94  for( int i = 0; i < 4; i++ )
95  nums[i] = (int)owned_entities.num_of_dimension( i );
96  vector< int > rbuf( nprocs * 4, 0 );
97  MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
98  // Print the stats gathered:
99  if( 0 == global_rank )
100  {
101  for( int i = 0; i < nprocs; i++ )
102  cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
103  << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
104  }
105 
106  // Now exchange 1 layer of ghost elements, using vertices as bridge
107  // (we could have done this as part of reading process, using the PARALLEL_GHOSTS read option)
108  rval = pcomm->exchange_ghost_cells( 3, // int ghost_dim
109  0, // int bridge_dim
110  1, // int num_layers
111  0, // int addl_ents
112  true );MB_CHK_ERR( rval ); // bool store_remote_handles
113 
114  // Repeat the reports, after ghost exchange
115  shared_ents.clear();
116  owned_entities.clear();
117  rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
118  rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
119 
120  // Find out how many shared entities of each dimension are owned on this processor
121  for( int i = 0; i < 4; i++ )
122  nums[i] = (int)owned_entities.num_of_dimension( i );
123 
124  // Gather the statistics on processor 0
125  MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
126  if( 0 == global_rank )
127  {
128  cout << " \n\n After exchanging one ghost layer: \n";
129  for( int i = 0; i < nprocs; i++ )
130  {
131  cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
132  << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
133  }
134  }
135 
136  delete mb;
137 
138  MPI_Finalize();
139 #else
140  std::cout << " compile with MPI and hdf5 for this example to work\n";
141 
142 #endif
143  return 0;
144 }