MOAB: Mesh Oriented datABase  (version 5.5.0)
migrate_test.cpp
Go to the documentation of this file.
1 /*
2  * migrate_test will contain tests for migrating meshes in parallel environments, with iMOAB api
3  * these methods are tested also in the example MigrateMesh.F90, with variable
4  * numbers of processes; migrate_test is launched usually on 2 processes, and it tests
5  * various cases
6  * a mesh is read on senders tasks, sent to receivers tasks, and then written out for verification
7  * It depends on hdf5 parallel for reading and writing in parallel
8  */
9 
10 #include "moab/ParallelComm.hpp"
11 #include "moab/Core.hpp"
12 #include "moab_mpi.h"
13 #include "moab/iMOAB.h"
14 #include "TestUtil.hpp"
15 
16 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
17 
18 using namespace moab;
19 
20 #define CHECKRC( rc, message ) \
21  if( 0 != ( rc ) ) \
22  { \
23  printf( "Error: %s\n", message ); \
24  return MB_FAILURE; \
25  }
26 
27 int is_any_proc_error( int is_my_error )
28 {
29  int result = 0;
30  int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
31  return err || result;
32 }
33 
34 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
35 {
36  ErrorCode result = ( *func )( file_name );
37  int is_err = is_any_proc_error( ( MB_SUCCESS != result ) );
38  int rank;
39  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
40  if( rank == 0 )
41  {
42  if( is_err )
43  std::cout << func_name << " : FAILED!!" << std::endl;
44  else
45  std::cout << func_name << " : success" << std::endl;
46  }
47 
48  return is_err;
49 }
50 
51 ErrorCode migrate_1_1( const char* filename );
52 ErrorCode migrate_1_2( const char* filename );
53 ErrorCode migrate_2_1( const char* filename );
54 ErrorCode migrate_2_2( const char* filename );
55 ErrorCode migrate_4_2( const char* filename );
56 ErrorCode migrate_2_4( const char* filename );
57 ErrorCode migrate_4_3( const char* filename );
58 ErrorCode migrate_overlap( const char* filename );
59 
60 // some global variables, used by all tests
61 int rank, size, ierr;
62 
63 int compid1, compid2; // component ids are unique over all pes, and established in advance;
64 int nghlay; // number of ghost layers for loading the file
65 int groupTasks[4]; // at most 4 tasks
67 
68 MPI_Comm jcomm; // will be a copy of the global
69 MPI_Group jgroup;
70 
71 int main( int argc, char* argv[] )
72 {
73  MPI_Init( &argc, &argv );
74  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
75  MPI_Comm_size( MPI_COMM_WORLD, &size );
76 
77  MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
78  MPI_Comm_group( jcomm, &jgroup );
79 
80  std::string filename;
81  filename = TestDir + "unittest/field1.h5m";
82  if( argc > 1 )
83  {
84  filename = argv[1];
85  }
86  int num_errors = 0;
87  num_errors += RUN_TEST_ARG2( migrate_1_1, filename.c_str() );
88  num_errors += RUN_TEST_ARG2( migrate_1_2, filename.c_str() );
89  num_errors += RUN_TEST_ARG2( migrate_2_1, filename.c_str() );
90  num_errors += RUN_TEST_ARG2( migrate_2_2, filename.c_str() );
91  if( size >= 4 )
92  {
93  num_errors += RUN_TEST_ARG2( migrate_4_2, filename.c_str() );
94  num_errors += RUN_TEST_ARG2( migrate_2_4, filename.c_str() );
95  num_errors += RUN_TEST_ARG2( migrate_4_3, filename.c_str() );
96  num_errors += RUN_TEST_ARG2( migrate_overlap, filename.c_str() );
97  }
98  if( rank == 0 )
99  {
100  if( !num_errors )
101  std::cout << "All tests passed" << std::endl;
102  else
103  std::cout << num_errors << " TESTS FAILED!" << std::endl;
104  }
105 
106  MPI_Group_free( &jgroup );
107  MPI_Comm_free( &jcomm );
108  MPI_Finalize();
109  return num_errors;
110 }
111 
112 ErrorCode migrate( const char* filename, const char* outfile )
113 {
114  // first create MPI groups
115 
116  std::string filen( filename );
117  MPI_Group group1, group2;
118  for( int i = startG1; i <= endG1; i++ )
119  groupTasks[i - startG1] = i;
120 
121  ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, groupTasks, &group1 );
122  CHECKRC( ierr, "can't create group1" )
123 
124  for( int i = startG2; i <= endG2; i++ )
125  groupTasks[i - startG2] = i;
126 
127  ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, groupTasks, &group2 );
128  CHECKRC( ierr, "can't create group2" )
129 
130  // create 2 communicators, one for each group
131  int tagcomm1 = 1, tagcomm2 = 2;
132  MPI_Comm comm1, comm2;
133  ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
134  CHECKRC( ierr, "can't create comm1" )
135 
136  ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
137  CHECKRC( ierr, "can't create comm2" )
138 
139  ierr = iMOAB_Initialize( 0, 0 ); // not really needed anything from argc, argv, yet; maybe we should
140  CHECKRC( ierr, "can't initialize iMOAB" )
141 
142  // give some dummy values to component ids, just to differentiate between them
143  // the par comm graph is unique between components
144  compid1 = 4;
145  compid2 = 7;
146  int context_id = -1; // default context; will be now set to compid1 or compid2
147 
148  int appID1;
149  iMOAB_AppID pid1 = &appID1;
150  int appID2;
151  iMOAB_AppID pid2 = &appID2;
152 
153  if( comm1 != MPI_COMM_NULL )
154  {
155  ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
156  CHECKRC( ierr, "can't register app1 " )
157  }
158  if( comm2 != MPI_COMM_NULL )
159  {
160  ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
161  CHECKRC( ierr, "can't register app2 " )
162  }
163 
164  int method = 0; // trivial partition for sending
165  if( comm1 != MPI_COMM_NULL )
166  {
167 
168  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
169 
170  nghlay = 0;
171 
172  ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
173  CHECKRC( ierr, "can't load mesh " )
174  ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &method ); // send to component 2
175  CHECKRC( ierr, "cannot send elements" )
176  }
177 
178  if( comm2 != MPI_COMM_NULL )
179  {
180  ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 ); // receive from component 1
181  CHECKRC( ierr, "cannot receive elements" )
182  std::string wopts;
183  wopts = "PARALLEL=WRITE_PART;";
184  ierr = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
185  CHECKRC( ierr, "cannot write received mesh" )
186  }
187 
188  MPI_Barrier( jcomm );
189 
190  // we can now free the sender buffers
191  if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
192 
193  // exchange tag, from component to component
194  // one is receiving, one is sending the tag; the one that is sending needs to have communicator
195  // not null
196  int size_tag = 1; // a double dense tag, on elements
197  int tagType = DENSE_DOUBLE;
198  int tagIndex2 = 0, tagIndex1 = 0; // these will be tag indices on each app pid
199 
200  std::string fileAfterTagMigr( outfile ); // has h5m
201  int sizen = fileAfterTagMigr.length();
202  fileAfterTagMigr.erase( sizen - 4, 4 ); // erase extension .h5m
203  fileAfterTagMigr = fileAfterTagMigr + "_tag.h5m";
204 
205  // now send a tag from component 2, towards component 1
206  if( comm2 != MPI_COMM_NULL )
207  {
208  ierr = iMOAB_DefineTagStorage( pid2, "element_field", &tagType, &size_tag, &tagIndex2 );
209  CHECKRC( ierr, "failed to get tag element_field " );
210  // this tag is already existing in the file
211 
212  // first, send from compid2 to compid1, from comm2, using common joint comm
213  // as always, use nonblocking sends
214  // contex_id should be now compid1
215  context_id = compid1;
216  ierr = iMOAB_SendElementTag( pid2, "element_field", &jcomm, &context_id );
217  CHECKRC( ierr, "cannot send tag values" )
218  }
219  // receive on component 1
220  if( comm1 != MPI_COMM_NULL )
221  {
222  ierr = iMOAB_DefineTagStorage( pid1, "element_field", &tagType, &size_tag, &tagIndex1 );
223  CHECKRC( ierr, "failed to get tag DFIELD " );
224  context_id = compid2;
225  ierr = iMOAB_ReceiveElementTag( pid1, "element_field", &jcomm, &context_id );
226  CHECKRC( ierr, "cannot send tag values" )
227  std::string wopts;
228  wopts = "PARALLEL=WRITE_PART;";
229  ierr = iMOAB_WriteMesh( pid1, fileAfterTagMigr.c_str(), wopts.c_str() );
230  CHECKRC( ierr, "cannot write received mesh" )
231  }
232 
233  MPI_Barrier( jcomm );
234 
235  // we can now free the sender buffers
236  if( comm2 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid2, &context_id );
237 
238  if( comm2 != MPI_COMM_NULL )
239  {
241  CHECKRC( ierr, "cannot deregister app 2 receiver" )
242  }
243 
244  if( comm1 != MPI_COMM_NULL )
245  {
247  CHECKRC( ierr, "cannot deregister app 1 sender" )
248  }
249 
250  ierr = iMOAB_Finalize();
251  CHECKRC( ierr, "did not finalize iMOAB" )
252 
253  if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
254  if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
255 
256  MPI_Group_free( &group1 );
257  MPI_Group_free( &group2 );
258  return MB_SUCCESS;
259 }
260 // migrate from task 0 to task 1, non overlapping
262 {
263  startG1 = endG1 = 0;
264  startG2 = endG2 = 1;
265  return migrate( filename, "migrate11.h5m" );
266 }
267 // migrate from task 0 to 2 tasks (0 and 1)
269 {
270  startG1 = endG1 = startG2 = 0;
271  endG2 = 1;
272  return migrate( filename, "migrate12.h5m" );
273 }
274 
275 // migrate from 2 tasks (0, 1) to 1 task (0)
277 {
278  startG1 = endG2 = startG2 = 0;
279  endG1 = 1;
280  return migrate( filename, "migrate21.h5m" );
281 }
282 
283 // migrate from 2 tasks to 2 tasks (overkill)
285 {
286  startG1 = startG2 = 0;
287  endG1 = endG2 = 1;
288  return migrate( filename, "migrate22.h5m" );
289 }
290 // migrate from 4 tasks to 2 tasks
292 {
293  startG1 = startG2 = 0;
294  endG2 = 1;
295  endG1 = 3;
296  return migrate( filename, "migrate42.h5m" );
297 }
298 
300 {
301  startG1 = startG2 = 0;
302  endG2 = 3;
303  endG1 = 1;
304  return migrate( filename, "migrate24.h5m" );
305 }
306 
308 {
309  startG1 = startG2 = 0;
310  endG2 = 2;
311  endG1 = 3;
312  return migrate( filename, "migrate43.h5m" );
313 }
314 
316 {
317  startG1 = 0;
318  startG2 = 1;
319  endG1 = 1;
320  endG2 = 2;
321  return migrate( filename, "migrate_over.h5m" );
322 }