MOAB: Mesh Oriented datABase  (version 5.5.0)
migrate_nontrivial.cpp
Go to the documentation of this file.
1 /*
2  * migrate_nontrivial.cpp
3  *
4  * migrate_nontrivial will contain tests for migrating meshes in parallel environments, with iMOAB
5  * api, using nontrivial partitions; for starters, we will use zoltan to compute partition in
6  * parallel, and then use the result to migrate the mesh trivial partition gave terrible results for
7  * mpas type meshes, that were numbered like a fractal; the resulting migrations were like Swiss
8  * cheese, full of holes a mesh is read on senders tasks we will use graph like methods or geometry
9  * methods, and will modify the ZoltanPartitioner to add needed methods
10  *
11  * mesh will be sent to receivers tasks, with nonblocking MPI_Isend calls, and then received
12  * with blocking MPI_Recv calls;
13  *
14  * we will not modify the GLOBAL_ID tag, we assume it was set correctly before we started
15  */
16 
17 #include "moab/ParallelComm.hpp"
18 #include "moab/Core.hpp"
19 #include "moab_mpi.h"
20 #include "moab/iMOAB.h"
21 #include "TestUtil.hpp"
22 #include "moab/ProgOptions.hpp"
23 
24 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
25 
26 using namespace moab;
27 
28 //#define GRAPH_INFO
29 
30 #define CHECKRC( rc, message ) \
31  if( 0 != ( rc ) ) \
32  { \
33  printf( "Error: %s\n", message ); \
34  return MB_FAILURE; \
35  }
36 
37 int is_any_proc_error( int is_my_error )
38 {
39  int result = 0;
40  int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
41  return err || result;
42 }
43 
44 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
45 {
46  ErrorCode result = ( *func )( file_name );
47  int is_err = is_any_proc_error( ( MB_SUCCESS != result ) );
48  int rank;
49  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
50  if( rank == 0 )
51  {
52  if( is_err )
53  std::cout << func_name << " : FAILED!!" << std::endl;
54  else
55  std::cout << func_name << " : success" << std::endl;
56  }
57 
58  return is_err;
59 }
60 
61 ErrorCode migrate_graph( const char* filename );
62 ErrorCode migrate_geom( const char* filename );
63 ErrorCode migrate_trivial( const char* filename );
64 
65 // some global variables, used by all tests
66 int rank, size, ierr;
67 
68 int compid1, compid2; // component ids are unique over all pes, and established in advance;
69 int nghlay; // number of ghost layers for loading the file
70 std::vector< int > groupTasks; // at most 4 tasks
72 
73 MPI_Comm jcomm; // will be a copy of the global
74 MPI_Group jgroup;
75 
76 ErrorCode migrate_smart( const char* filename, const char* outfile, int partMethod )
77 {
78  // first create MPI groups
79 
80  std::string filen( filename );
81  MPI_Group group1, group2;
82  groupTasks.resize( endG1 - startG1 + 1 );
83  for( int i = startG1; i <= endG1; i++ )
84  groupTasks[i - startG1] = i;
85 
86  ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &group1 );
87  CHECKRC( ierr, "can't create group1" )
88 
89  groupTasks.resize( endG2 - startG2 + 1 );
90  for( int i = startG2; i <= endG2; i++ )
91  groupTasks[i - startG2] = i;
92 
93  ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &group2 );
94  CHECKRC( ierr, "can't create group2" )
95 
96  // create 2 communicators, one for each group
97  int tagcomm1 = 1, tagcomm2 = 2;
98  int context_id = -1; // plain migrate, default context
99  MPI_Comm comm1, comm2;
100  ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
101  CHECKRC( ierr, "can't create comm1" )
102 
103  ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
104  CHECKRC( ierr, "can't create comm2" )
105 
106  ierr = iMOAB_Initialize( 0, 0 ); // not really needed anything from argc, argv, yet; maybe we should
107  CHECKRC( ierr, "can't initialize iMOAB" )
108 
109  // give some dummy values to component ids, just to differentiate between them
110  // the par comm graph is unique between components
111  compid1 = 4;
112  compid2 = 7;
113 
114  int appID1;
115  iMOAB_AppID pid1 = &appID1;
116  int appID2;
117  iMOAB_AppID pid2 = &appID2;
118 
119  if( comm1 != MPI_COMM_NULL )
120  {
121  ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
122  CHECKRC( ierr, "can't register app1 " )
123  }
124  if( comm2 != MPI_COMM_NULL )
125  {
126  ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
127  CHECKRC( ierr, "can't register app2 " )
128  }
129 
130  if( comm1 != MPI_COMM_NULL )
131  {
132 
133  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
134 
135  nghlay = 0;
136 
137  ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
138  CHECKRC( ierr, "can't load mesh " )
139  ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &partMethod ); // send to component 2
140  CHECKRC( ierr, "cannot send elements" )
141 #ifdef GRAPH_INFO
142  int is_sender = 1;
143  int context = compid2;
144  iMOAB_DumpCommGraph( pid1, &context, &is_sender, "MigrateS" );
145 #endif
146  }
147 
148  if( comm2 != MPI_COMM_NULL )
149  {
150  ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 ); // receive from component 1
151  CHECKRC( ierr, "cannot receive elements" )
152  std::string wopts;
153  wopts = "PARALLEL=WRITE_PART;";
154  ierr = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
155  CHECKRC( ierr, "cannot write received mesh" )
156 #ifdef GRAPH_INFO
157  int is_sender = 0;
158  int context = compid1;
159  iMOAB_DumpCommGraph( pid2, &context, &is_sender, "MigrateR" );
160 #endif
161  }
162 
163  MPI_Barrier( jcomm );
164 
165  // we can now free the sender buffers
166  context_id = compid2; // even for default migrate, be more explicit
167  if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
168 
169  if( comm2 != MPI_COMM_NULL )
170  {
172  CHECKRC( ierr, "cannot deregister app 2 receiver" )
173  }
174 
175  if( comm1 != MPI_COMM_NULL )
176  {
178  CHECKRC( ierr, "cannot deregister app 1 sender" )
179  }
180 
181  ierr = iMOAB_Finalize();
182  CHECKRC( ierr, "did not finalize iMOAB" )
183 
184  if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
185  if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
186 
187  MPI_Group_free( &group1 );
188  MPI_Group_free( &group2 );
189  return MB_SUCCESS;
190 }
191 // migrate from 2 tasks to 3 tasks
193 {
194  return migrate_smart( filename, "migrate_graph.h5m", 1 );
195 }
196 
198 {
199  return migrate_smart( filename, "migrate_geom.h5m", 2 );
200 }
201 
203 {
204  return migrate_smart( filename, "migrate_trivial.h5m", 0 );
205 }
206 
207 int main( int argc, char* argv[] )
208 {
209  MPI_Init( &argc, &argv );
210  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
211  MPI_Comm_size( MPI_COMM_WORLD, &size );
212 
213  MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
214  MPI_Comm_group( jcomm, &jgroup );
215 
216  ProgOptions opts;
217  int typeTest = 3;
218  // std::string inputfile, outfile("out.h5m"), netcdfFile, variable_name, sefile_name;
219  std::string filename;
220  filename = TestDir + "unittest/field1.h5m";
221  startG1 = 0;
222  startG2 = 0;
223  endG1 = 0;
224  endG2 = 1;
225 
226  opts.addOpt< std::string >( "file,f", "source file", &filename );
227 
228  opts.addOpt< int >( "startSender,a", "start task for source layout", &startG1 );
229  opts.addOpt< int >( "endSender,b", "end task for source layout", &endG1 );
230  opts.addOpt< int >( "startRecv,c", "start task for receiver layout", &startG2 );
231  opts.addOpt< int >( "endRecv,d", "end task for receiver layout", &endG2 );
232 
233  opts.addOpt< int >( "typeTest,t", "test types (0 - trivial, 1 graph, 2 geom, 3 both graph and geometry",
234  &typeTest );
235 
236  opts.parseCommandLine( argc, argv );
237 
238  if( rank == 0 )
239  {
240  std::cout << " input file : " << filename << "\n";
241  std::cout << " sender on tasks: " << startG1 << ":" << endG1 << "\n";
242  std::cout << " receiver on tasks: " << startG2 << ":" << endG2 << "\n";
243  std::cout << " type migrate: " << typeTest << " (0 - trivial, 1 graph , 2 geom, 3 both graph and geom ) \n";
244  }
245 
246  int num_errors = 0;
247 
248  if( 0 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_trivial, filename.c_str() );
249  if( 3 == typeTest || 1 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_graph, filename.c_str() );
250  if( 3 == typeTest || 2 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_geom, filename.c_str() );
251 
252  if( rank == 0 )
253  {
254  if( !num_errors )
255  std::cout << "All tests passed" << std::endl;
256  else
257  std::cout << num_errors << " TESTS FAILED!" << std::endl;
258  }
259 
260  MPI_Group_free( &jgroup );
261  MPI_Comm_free( &jcomm );
262  MPI_Finalize();
263  return num_errors;
264 }
265 
266 #undef VERBOSE