MOAB: Mesh Oriented datABase  (version 5.5.0)
commgraph_test.cpp
Go to the documentation of this file.
1 /*
2  * This commgraph_test primary goal is to setup a communication framework between
3  * two components, based only on a marker associated to the data being migrated
4  * between the components
5  * One use case for us is the physics grid in the atmosphere and spectral dynamics grid in the
6  * atmosphere
7  * These could be in theory on different sets of PES, although these 2 models are on the same PES
8  *
9  * In our case, spectral element data is hooked using the GLOBAL_DOFS tag of 4x4 ids, associated to
10  * element
11  * PhysGrid is coming from an AtmPhys.h5m point cloud grid partitioned in 16
12  * Spectral mesh is our regular wholeATM_T_01.h5m, which is after one time step
13  *
14  * phys grid is very sprinkled in the partition, spectral mesh is more compact; For correct
15  * identification/correspondence in parallel, it would make sense to use boxes for the spectral mesh
16  *
17  * We employ our friends the crystal router, in which we use rendezvous algorithm, to set
18  * up the communication pattern
19  *
20  * input: wholeATM_T.h5m file, on 128 procs, the same file that is used by imoab_coupler test
21  * input: AtmPhys.h5m file, which contains the physics grid, distributed on 16 processes
22  * input 2: wholeLND.h5m, which is land distributed on 16 processes too
23  *
24  * The communication pattern will be established using a rendezvous method, based on the marker
25  * (in this case, GLOBAL_ID on vertices on phys grid and GLOBAL_DOFS tag on spectral elements)
26  *
27  * in the end, we need to modify tag migrate to move data between these types of components, by
28  * ID
29  *
30  * wholeLnd.h5m has holes in the ID space, that we need to account for;
31  * In the end, this could be used to send data directly from Dyn atm to land; or to lnd on coupler ?
32  *
33  *
34  */
35 
36 #include "moab/Core.hpp"
37 
38 // MPI includes
39 #include "moab_mpi.h"
40 #include "moab/ParallelComm.hpp"
41 #include "MBParallelConventions.h"
42 
43 #include "moab/iMOAB.h"
44 #include "TestUtil.hpp"
45 #include "moab/CpuTimer.hpp"
46 #include "moab/ProgOptions.hpp"
47 #include <iostream>
48 #include <sstream>
49 
50 #define CHECKIERR( rc, message ) \
51  if( 0 != ( rc ) ) \
52  { \
53  printf( "%s. ErrorCode = %d.\n", message, rc ); \
54  CHECK( 0 ); \
55  }
56 
57 using namespace moab;
58 
59 // #define VERBOSE
60 
61 // declare some variables outside main method
62 // easier to pass them around to the test
63 int ierr;
65 MPI_Group jgroup;
66 std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
67 // on a regular case, 5 ATM
68 // cmpatm is for atm on atm pes ! it has the spectral mesh
69 // cmpphys is for atm on atm phys pes ! it has the point cloud , phys grid
70 
71 //
72 int rankInAtmComm = -1;
73 // it is the spectral mesh unique comp id
74 int cmpatm = 605; // component ids are unique over all pes, and established in advance;
75 
76 std::string atmPhysFilename = TestDir + "unittest/AtmPhys.h5m";
77 std::string atmPhysOutFilename = "outPhys.h5m";
78 std::string atmFilename2 = "wholeATM_new.h5m";
79 int rankInPhysComm = -1;
80 // this will be the physics atm com id; it should be actually 5
81 int physatm = 5; // component ids are unique over all pes, and established in advance;
82 
83 // int rankInJointComm = -1;
84 
85 int nghlay = 0; // number of ghost layers for loading the file
86 
87 std::vector< int > groupTasks;
88 int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
89 int typeA = 1; // spectral mesh, with GLOBAL_DOFS tags on cells
90 int typeB = 2; // point cloud mesh, with GLOBAL_ID tag on vertices
91 
92 std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
93 std::string readoptsPC( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
94 std::string fileWriteOptions( "PARALLEL=WRITE_PART" );
95 std::string tagT( "a2oTbot" );
96 std::string tagU( "a2oUbot" );
97 std::string tagV( "a2oVbot" );
98 std::string separ( ":" );
99 std::string tagT1( "a2oTbot_1" );
100 std::string tagU1( "a2oUbot_1" );
101 std::string tagV1( "a2oVbot_1" );
102 std::string tagT2( "a2oTbot_2" ); // just one send
103 
104 int commgraphtest();
105 
107 {
108  // no changes
109  commgraphtest();
110 }
111 
113 {
114  // first model is spectral, second is land
115  atmPhysFilename = TestDir + "unittest/wholeLnd.h5m";
116  atmPhysOutFilename = std::string( "outLnd.h5m" );
117  atmFilename2 = std::string( "wholeATM_lnd.h5m" );
118  commgraphtest();
119 }
120 
122 {
123  // use for first file the output "outPhys.h5m" from first test
124  atmFilename = std::string( "outPhys.h5m" );
125  atmPhysFilename = std::string( "outLnd.h5m" );
126  atmPhysOutFilename = std::string( "physAtm_lnd.h5m" );
127  atmFilename2 = std::string( "physBack_lnd.h5m" );
128  tagT = tagT1;
129  tagU = tagU1;
130  tagV = tagV1;
131  tagT1 = std::string( "newT" );
132  tagT2 = std::string( "newT2" );
133  typeA = 2;
134  commgraphtest();
135 }
137 {
138 
139  if( !rankInGlobalComm )
140  {
141  std::cout << " first file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1
142  << "\n second file: " << atmPhysFilename << "\n on tasks : " << startG2 << ":" << endG2 << "\n ";
143  }
144 
145  // load files on 2 different communicators, groups
146  // will create a joint comm for rendezvous
147  MPI_Group atmPEGroup;
148  groupTasks.resize( numProcesses, 0 );
149  for( int i = startG1; i <= endG1; i++ )
150  groupTasks[i - startG1] = i;
151 
152  ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &atmPEGroup );
153  CHECKIERR( ierr, "Cannot create atmPEGroup" )
154 
155  groupTasks.clear();
156  groupTasks.resize( numProcesses, 0 );
157  MPI_Group atmPhysGroup;
158  for( int i = startG2; i <= endG2; i++ )
159  groupTasks[i - startG2] = i;
160 
161  ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &atmPhysGroup );
162  CHECKIERR( ierr, "Cannot create atmPhysGroup" )
163 
164  // create 2 communicators, one for each group
165  int ATM_COMM_TAG = 1;
166  MPI_Comm atmComm;
167  // atmComm is for atmosphere app;
168  ierr = MPI_Comm_create_group( MPI_COMM_WORLD, atmPEGroup, ATM_COMM_TAG, &atmComm );
169  CHECKIERR( ierr, "Cannot create atmComm" )
170 
171  int PHYS_COMM_TAG = 2;
172  MPI_Comm physComm;
173  // physComm is for phys atm app
174  ierr = MPI_Comm_create_group( MPI_COMM_WORLD, atmPhysGroup, PHYS_COMM_TAG, &physComm );
175  CHECKIERR( ierr, "Cannot create physComm" )
176 
177  // now, create the joint communicator atm physatm
178 
179  //
180  MPI_Group joinAtmPhysAtmGroup;
181  ierr = MPI_Group_union( atmPEGroup, atmPhysGroup, &joinAtmPhysAtmGroup );
182  CHECKIERR( ierr, "Cannot create joint atm - phys atm group" )
183  int JOIN_COMM_TAG = 5;
184  MPI_Comm joinComm;
185  ierr = MPI_Comm_create_group( MPI_COMM_WORLD, joinAtmPhysAtmGroup, JOIN_COMM_TAG, &joinComm );
186  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
187 
188  ierr = iMOAB_Initialize( 0, NULL ); // not really needed anything from argc, argv, yet; maybe we should
189  CHECKIERR( ierr, "Cannot initialize iMOAB" )
190 
191  int cmpAtmAppID = -1;
192  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
193  int physAtmAppID = -1; // -1 means it is not initialized
194  iMOAB_AppID physAtmPID = &physAtmAppID; // phys atm on phys pes
195 
196  // load atm mesh
197  if( atmComm != MPI_COMM_NULL )
198  {
199  MPI_Comm_rank( atmComm, &rankInAtmComm );
200  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
201  CHECKIERR( ierr, "Cannot register ATM App" )
202 
203  // load first model
204  std::string rdopts = readopts;
205  if( typeA == 2 ) rdopts = readoptsPC; // point cloud
206  ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), rdopts.c_str(), &nghlay );
207  CHECKIERR( ierr, "Cannot load ATM mesh" )
208  }
209 
210  // load atm phys mesh
211  if( physComm != MPI_COMM_NULL )
212  {
213  MPI_Comm_rank( physComm, &rankInPhysComm );
214  ierr = iMOAB_RegisterApplication( "PhysATM", &physComm, &physatm, physAtmPID );
215  CHECKIERR( ierr, "Cannot register PHYS ATM App" )
216 
217  // load phys atm mesh all tests this is PC
218  ierr = iMOAB_LoadMesh( physAtmPID, atmPhysFilename.c_str(), readoptsPC.c_str(), &nghlay );
219  CHECKIERR( ierr, "Cannot load Phys ATM mesh" )
220  }
221 
222  if( MPI_COMM_NULL != joinComm )
223  {
224  ierr = iMOAB_ComputeCommGraph( cmpAtmPID, physAtmPID, &joinComm, &atmPEGroup, &atmPhysGroup, &typeA, &typeB,
225  &cmpatm, &physatm );
226  // it will generate parcomm graph between atm and atmPhys models
227  // 2 meshes, that are distributed in parallel
228  CHECKIERR( ierr, "Cannot compute comm graph between the two apps " )
229  }
230 
231  if( atmComm != MPI_COMM_NULL )
232  {
233  // call send tag;
234  std::string tags = tagT + separ + tagU + separ + tagV;
235  ierr = iMOAB_SendElementTag( cmpAtmPID, tags.c_str(), &joinComm, &physatm );
236  CHECKIERR( ierr, "cannot send tag values" )
237  }
238 
239  if( physComm != MPI_COMM_NULL )
240  {
241  // need to define tag storage
242  std::string tags1 = tagT1 + separ + tagU1 + separ + tagV1 + separ;
243  int tagType = DENSE_DOUBLE;
244  int ndof = 1;
245  if( typeB == 1 ) ndof = 16;
246  int tagIndex = 0;
247  ierr = iMOAB_DefineTagStorage( physAtmPID, tagT1.c_str(), &tagType, &ndof, &tagIndex );
248  CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
249 
250  ierr = iMOAB_DefineTagStorage( physAtmPID, tagU1.c_str(), &tagType, &ndof, &tagIndex );
251  CHECKIERR( ierr, "failed to define the field tag a2oUbot" );
252 
253  ierr = iMOAB_DefineTagStorage( physAtmPID, tagV1.c_str(), &tagType, &ndof, &tagIndex );
254  CHECKIERR( ierr, "failed to define the field tag a2oVbot" );
255 
256  ierr = iMOAB_ReceiveElementTag( physAtmPID, tags1.c_str(), &joinComm, &cmpatm );
257  CHECKIERR( ierr, "cannot receive tag values" )
258  }
259 
260  // we can now free the sender buffers
261  if( atmComm != MPI_COMM_NULL )
262  {
263  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &physatm );
264  CHECKIERR( ierr, "cannot free buffers" )
265  }
266 
267  if( physComm != MPI_COMM_NULL )
268  {
269  ierr = iMOAB_WriteMesh( physAtmPID, atmPhysOutFilename.c_str(), fileWriteOptions.c_str() );
270  }
271  if( physComm != MPI_COMM_NULL )
272  {
273  // send back first tag only
274  ierr = iMOAB_SendElementTag( physAtmPID, tagT1.c_str(), &joinComm, &cmpatm );
275  CHECKIERR( ierr, "cannot send tag values" )
276  }
277  // receive it in a different tag
278  if( atmComm != MPI_COMM_NULL )
279  {
280  // need to define tag storage
281  int tagType = DENSE_DOUBLE;
282  int ndof = 16;
283  if( typeA == 2 ) ndof = 1;
284  int tagIndex = 0;
285  ierr = iMOAB_DefineTagStorage( cmpAtmPID, tagT2.c_str(), &tagType, &ndof, &tagIndex );
286  CHECKIERR( ierr, "failed to define the field tag a2oTbot_2" );
287 
288  ierr = iMOAB_ReceiveElementTag( cmpAtmPID, tagT2.c_str(), &joinComm, &physatm );
289  CHECKIERR( ierr, "cannot receive tag values a2oTbot_2" )
290  }
291  // now send back one tag , into a different tag, and see if we get the same values back
292  // we can now free the sender buffers
293  if( physComm != MPI_COMM_NULL )
294  {
295  ierr = iMOAB_FreeSenderBuffers( physAtmPID, &cmpatm );
296  CHECKIERR( ierr, "cannot free buffers " )
297  }
298  if( atmComm != MPI_COMM_NULL )
299  {
300  ierr = iMOAB_WriteMesh( cmpAtmPID, atmFilename2.c_str(), fileWriteOptions.c_str() );
301  }
302 
303  // unregister in reverse order
304  if( physComm != MPI_COMM_NULL )
305  {
306  ierr = iMOAB_DeregisterApplication( physAtmPID );
307  CHECKIERR( ierr, "cannot deregister second app model" )
308  }
309 
310  if( atmComm != MPI_COMM_NULL )
311  {
312  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
313  CHECKIERR( ierr, "cannot deregister first app model" )
314  }
315 
316  ierr = iMOAB_Finalize();
317  CHECKIERR( ierr, "did not finalize iMOAB" )
318 
319  // free atm group and comm
320  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
321  MPI_Group_free( &atmPEGroup );
322 
323  // free atm phys group and comm
324  if( MPI_COMM_NULL != physComm ) MPI_Comm_free( &physComm );
325  MPI_Group_free( &atmPhysGroup );
326 
327  // free atm phys group and comm
328  if( MPI_COMM_NULL != joinComm ) MPI_Comm_free( &joinComm );
329  MPI_Group_free( &joinAtmPhysAtmGroup );
330 
331  return 0;
332 }
333 int main( int argc, char* argv[] )
334 {
335 
336  MPI_Init( &argc, &argv );
337  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
338  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
339 
340  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in global group
341 
342  // default: load atm on 2 proc, phys grid on 2 procs, establish comm graph, then migrate
343  // data from atm pes to phys pes, and back
344  startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
345 
346  ProgOptions opts;
347  opts.addOpt< std::string >( "modelA,m", "first model file ", &atmFilename );
348  opts.addOpt< int >( "typeA,t", " type of first model ", &typeA );
349 
350  opts.addOpt< std::string >( "modelB,n", "second model file", &atmPhysFilename );
351  opts.addOpt< int >( "typeB,v", " type of the second model ", &typeB );
352 
353  opts.addOpt< int >( "startAtm,a", "start task for first model layout", &startG1 );
354  opts.addOpt< int >( "endAtm,b", "end task for first model layout", &endG1 );
355 
356  opts.addOpt< int >( "startPhys,c", "start task for second model layout", &startG2 );
357  opts.addOpt< int >( "endPhys,d", "end task for second model layout", &endG2 );
358 
359  opts.addOpt< std::string >( "output,o", "output filename", &atmPhysOutFilename );
360 
361  opts.addOpt< std::string >( "output,o", "output filename", &atmFilename2 );
362 
363  opts.parseCommandLine( argc, argv );
364 
365  int num_err = 0;
366  num_err += RUN_TEST( testspectral_phys );
367 
368  //
369  if( argc == 1 )
370  {
371  num_err += RUN_TEST( testspectral_lnd );
372  num_err += RUN_TEST( testphysatm_lnd );
373  }
374  MPI_Group_free( &jgroup );
375 
376  MPI_Finalize();
377 
378  return num_err;
379 }