MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_coupler.cpp
Go to the documentation of this file.
1 /*
2  * This imoab_coupler test will simulate coupling between 3 components
3  * 3 meshes will be loaded from 3 files (atm, ocean, lnd), and they will be migrated to
4  * all processors (coupler pes); then, intx will be performed between migrated meshes
5  * and weights will be generated, such that a field from one component will be transferred to
6  * the other component
7  * currently, the atm will send some data to be projected to ocean and land components
8  *
9  * first, intersect atm and ocn, and recompute comm graph 1 between atm and atm_cx, for ocn intx
10  * second, intersect atm and lnd, and recompute comm graph 2 between atm and atm_cx for lnd intx
11 
12  */
13 
14 #include "moab/Core.hpp"
15 #ifndef MOAB_HAVE_MPI
16 #error mbtempest tool requires MPI configuration
17 #endif
18 
19 // MPI includes
20 #include "moab_mpi.h"
21 #include "moab/ParallelComm.hpp"
22 #include "MBParallelConventions.h"
23 
24 #include "moab/iMOAB.h"
25 #include "TestUtil.hpp"
26 #include "moab/CpuTimer.hpp"
27 #include "moab/ProgOptions.hpp"
28 #include <iostream>
29 #include <sstream>
30 
31 #include "imoab_coupler_utils.hpp"
32 
33 using namespace moab;
34 
35 //#define GRAPH_INFO
36 
37 #ifndef MOAB_HAVE_TEMPESTREMAP
38 #error The climate coupler test example requires MOAB configuration with TempestRemap
39 #endif
40 
41 #define ENABLE_ATMOCN_COUPLING
42 #define ENABLE_ATMLND_COUPLING
43 
44 #if( !defined( ENABLE_ATMOCN_COUPLING ) && !defined( ENABLE_ATMLND_COUPLING ) )
45 #error Enable either OCN (ENABLE_ATMOCN_COUPLING) and/or LND (ENABLE_ATMLND_COUPLING) for coupling
46 #endif
47 
48 int main( int argc, char* argv[] )
49 {
50  int ierr;
52  MPI_Group jgroup;
53  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
54  std::string readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
55 
56  // Timer data
57  moab::CpuTimer timer;
58  double timer_ops;
59  std::string opName;
60 
61  int repartitioner_scheme = 0;
62 #ifdef MOAB_HAVE_ZOLTAN
63  repartitioner_scheme = 2; // use the graph partitioner in that caseS
64 #endif
65 
66  MPI_Init( &argc, &argv );
67  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
68  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
69 
70  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
71 
72  std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
73  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
74  // intx atm/ocn is not in e3sm yet, give a number
75  // 6 * 100+ 18 = 618 : atmocnid
76  // 9 LND, 10 CPLLND
77  // 6 * 100 + 10 = 610 atmlndid:
78  // cmpatm is for atm on atm pes
79  // cmpocn is for ocean, on ocean pe
80  // cplatm is for atm on coupler pes
81  // cplocn is for ocean on coupelr pes
82  // atmocnid is for intx atm / ocn on coupler pes
83  //
84  int rankInAtmComm = -1;
85  int cmpatm = 5,
86  cplatm = 6; // component ids are unique over all pes, and established in advance;
87 #ifdef ENABLE_ATMOCN_COUPLING
88  std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
89  std::string baseline = TestDir + "unittest/baseline1.txt";
90  int rankInOcnComm = -1;
91  int cmpocn = 17, cplocn = 18,
92  atmocnid = 618; // component ids are unique over all pes, and established in advance;
93 #endif
94 
95 #ifdef ENABLE_ATMLND_COUPLING
96  std::string lndFilename = TestDir + "unittest/wholeLnd.h5m";
97  int cpllnd = 10, cmplnd = 9,
98  atmlndid = 610; // component ids are unique over all pes, and established in advance;
99 #endif
100 
101  int rankInCouComm = -1;
102 
103  int nghlay = 0; // number of ghost layers for loading the file
104  std::vector< int > groupTasks;
105  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1,
106  endG3 = endG1; // Support launch of imoab_coupler test on any combo of 2*x processes
107  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
108  int context_id = -1; // used now for freeing buffers
109 
110  // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
111  // later, we need to compute weight matrix with tempestremap
112 
113  ProgOptions opts;
114  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
115 #ifdef ENABLE_ATMOCN_COUPLING
116  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
117 #endif
118 #ifdef ENABLE_ATMLND_COUPLING
119  opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename );
120 #endif
121  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
122  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
123 #ifdef ENABLE_ATMOCN_COUPLING
124  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
125  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
126 #endif
127 #ifdef ENABLE_ATMLND_COUPLING
128  opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 );
129  opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 );
130 #endif
131 
132  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
133  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
134 
135  opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
136 
137  int n = 1; // number of send/receive / project / send back cycles
138  opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
139 
140  bool no_regression_test = false;
141  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
142  opts.parseCommandLine( argc, argv );
143 
144  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
145 
146  if( !rankInGlobalComm )
147  {
148  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 <<
149 #ifdef ENABLE_ATMOCN_COUPLING
150  "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 <<
151 #endif
152 #ifdef ENABLE_ATMLND_COUPLING
153  "\n lnd file: " << lndFilename << "\n on tasks : " << startG3 << ":" << endG3 <<
154 #endif
155  "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n ";
156  }
157 
158  // load files on 3 different communicators, groups
159  // first groups has task 0, second group tasks 0 and 1
160  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
161  // first groups has task 0, second group tasks 0 and 1
162  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
163  MPI_Group atmPEGroup;
164  MPI_Comm atmComm;
165  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
166  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
167 
168 #ifdef ENABLE_ATMOCN_COUPLING
169  MPI_Group ocnPEGroup;
170  MPI_Comm ocnComm;
171  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
172  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
173 #endif
174 
175 #ifdef ENABLE_ATMLND_COUPLING
176  MPI_Group lndPEGroup;
177  MPI_Comm lndComm;
178  ierr = create_group_and_comm( startG3, endG3, jgroup, &lndPEGroup, &lndComm );
179  CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " )
180 #endif
181 
182  // we will always have a coupler
183  MPI_Group couPEGroup;
184  MPI_Comm couComm;
185  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
186  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
187 
188  // atm_coupler
189  MPI_Group joinAtmCouGroup;
190  MPI_Comm atmCouComm;
191  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
192  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
193 
194 #ifdef ENABLE_ATMOCN_COUPLING
195  // ocn_coupler
196  MPI_Group joinOcnCouGroup;
197  MPI_Comm ocnCouComm;
198  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
199  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
200 #endif
201 
202 #ifdef ENABLE_ATMLND_COUPLING
203  // lnd_coupler
204  MPI_Group joinLndCouGroup;
205  MPI_Comm lndCouComm;
206  ierr = create_joint_comm_group( lndPEGroup, couPEGroup, &joinLndCouGroup, &lndCouComm );
207  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
208 #endif
209 
210  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
211  CHECKIERR( ierr, "Cannot initialize iMOAB" )
212 
213  int cmpAtmAppID = -1;
214  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
215  int cplAtmAppID = -1; // -1 means it is not initialized
216  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
217 #ifdef ENABLE_ATMOCN_COUPLING
218  int cmpOcnAppID = -1;
219  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
220  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
221  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
222  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
223 #endif
224 
225 #ifdef ENABLE_ATMLND_COUPLING
226  int cmpLndAppID = -1;
227  iMOAB_AppID cmpLndPID = &cmpLndAppID; // lnd
228  int cplLndAppID = -1, cplAtmLndAppID = -1; // -1 means it is not initialized
229  iMOAB_AppID cplLndPID = &cplLndAppID; // land on coupler PEs
230  iMOAB_AppID cplAtmLndPID = &cplAtmLndAppID; // intx atm - lnd on coupler PEs
231 #endif
232 
233  if( couComm != MPI_COMM_NULL )
234  {
235  MPI_Comm_rank( couComm, &rankInCouComm );
236  // Register all the applications on the coupler PEs
237  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
238  cplAtmPID ); // atm on coupler pes
239  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
240 #ifdef ENABLE_ATMOCN_COUPLING
241  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
242  cplOcnPID ); // ocn on coupler pes
243  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
244 #endif
245 #ifdef ENABLE_ATMLND_COUPLING
246  ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd,
247  cplLndPID ); // lnd on coupler pes
248  CHECKIERR( ierr, "Cannot register LND over coupler PEs" )
249 #endif
250  }
251 
252  if( atmComm != MPI_COMM_NULL )
253  {
254  MPI_Comm_rank( atmComm, &rankInAtmComm );
255  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
256  CHECKIERR( ierr, "Cannot register ATM App" )
257  }
258 
259 #ifdef ENABLE_ATMOCN_COUPLING
260  if( ocnComm != MPI_COMM_NULL )
261  {
262  MPI_Comm_rank( ocnComm, &rankInOcnComm );
263  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
264  CHECKIERR( ierr, "Cannot register OCN App" )
265  }
266 #endif
267 
268  // atm
269  ierr =
270  setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
271  &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
272  CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
273 
274  int global_atm[2] = { 0, 0 };
275  if( atmComm != MPI_COMM_NULL )
276  {
277  ierr = iMOAB_GetGlobalInfo( cmpAtmPID, &( global_atm[0] ), &( global_atm[1] ) );
278  CHECKIERR( ierr, "cannot get atm global info" )
279  if( !rankInAtmComm )
280  std::cout << " atm mesh: vertices: " << global_atm[0] << " cells: " << global_atm[1] << "\n";
281  }
282  // broadcast to all tasks
283  int global_atm_red[2] = { 0, 0 };
284  MPI_Allreduce( global_atm, global_atm_red, 2, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
285  if( couComm != MPI_COMM_NULL )
286  {
287  int global_atm_cou[2];
288  ierr = iMOAB_GetGlobalInfo( cplAtmPID, &( global_atm_cou[0] ), &( global_atm_cou[1] ) );
289  CHECKIERR( ierr, "cannot get atm global info on coupler" )
290  CHECK_ARRAYS_EQUAL( global_atm_red, 2, global_atm_cou, 2 );
291  if( !rankInCouComm )
292  std::cout << " atm mesh on coupler: vertices: " << global_atm[0] << " cells: " << global_atm[1] << "\n";
293  }
294 
295 #ifdef VERBOSE
296  if( couComm != MPI_COMM_NULL && 1 == n )
297  { // write only for n==1 case
298  char outputFileTgt3[] = "recvAtm.h5m";
299  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileTgt3, fileWriteOptions, strlen( outputFileTgt3 ),
300  strlen( fileWriteOptions ) );
301  CHECKIERR( ierr, "cannot write atm mesh after receiving" )
302  }
303 #endif
304 #ifdef GRAPH_INFO
305  if( atmComm != MPI_COMM_NULL )
306  {
307 
308  int is_sender = 1;
309  int context = -1;
310  iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmMigS" );
311  }
312  if( couComm != MPI_COMM_NULL )
313  {
314  int is_sender = 0;
315  int context = -1;
316  iMOAB_DumpCommGraph( cplAtmPID, &context, &is_sender, "AtmMigR" );
317  }
318 #endif
319  MPI_Barrier( MPI_COMM_WORLD );
320 
321 #ifdef ENABLE_ATMOCN_COUPLING
322  // ocean
323  ierr =
324  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
325  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
326  CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
327 
328  MPI_Barrier( MPI_COMM_WORLD );
329 
330 #ifdef VERBOSE
331  if( couComm != MPI_COMM_NULL && 1 == n )
332  { // write only for n==1 case
333  char outputFileTgt3[] = "recvOcn.h5m";
334  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
335  CHECKIERR( ierr, "cannot write ocn mesh after receiving" )
336  }
337 #endif
338 #endif // #ifdef ENABLE_ATMOCN_COUPLING
339 
340 #ifdef ENABLE_ATMLND_COUPLING
341  // land
342  if( lndComm != MPI_COMM_NULL )
343  {
344  ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID );
345  CHECKIERR( ierr, "Cannot register LND App " )
346  }
347  ierr = setup_component_coupler_meshes( cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm, &lndPEGroup, &couComm,
348  &couPEGroup, &lndCouComm, lndFilename, readoptsLnd, nghlay,
349  repartitioner_scheme );
350 
351  if( couComm != MPI_COMM_NULL && 1 == n )
352  { // write only for n==1 case
353  char outputFileLnd[] = "recvLnd.h5m";
354  ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions );
355  CHECKIERR( ierr, "cannot write lnd mesh after receiving" )
356  }
357 
358 #endif // #ifdef ENABLE_ATMLND_COUPLING
359 
360  MPI_Barrier( MPI_COMM_WORLD );
361 
362 #ifdef ENABLE_ATMOCN_COUPLING
363  if( couComm != MPI_COMM_NULL )
364  {
365  // now compute intersection between OCNx and ATMx on coupler PEs
366  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
367  CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
368  }
369 #endif
370 #ifdef ENABLE_ATMLND_COUPLING
371  if( couComm != MPI_COMM_NULL )
372  {
373  // now compute intersection between LNDx and ATMx on coupler PEs
374  ierr = iMOAB_RegisterApplication( "ATMLND", &couComm, &atmlndid, cplAtmLndPID );
375  CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
376  }
377 #endif
378 
379  int disc_orders[3] = { 4, 1, 1 };
380  const std::string weights_identifiers[2] = { "scalar", "scalar-pc" };
381  const std::string disc_methods[3] = { "cgll", "fv", "pcloud" };
382  const std::string dof_tag_names[3] = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
383 #ifdef ENABLE_ATMOCN_COUPLING
384  if( couComm != MPI_COMM_NULL )
385  {
386  PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
387  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplOcnPID, cplAtmOcnPID );
388  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
389  // basically, atm was redistributed according to target (ocean) partition, to "cover" the
390  // ocean partitions check if intx valid, write some h5m intx file
391  CHECKIERR( ierr, "cannot compute intersection" )
392  POP_TIMER( couComm, rankInCouComm )
393 #ifdef VERBOSE
394  char prefix[] = "intx_atmocn";
395  ierr = iMOAB_WriteLocalMesh( cplAtmOcnPID, prefix, strlen( prefix ) );
396  CHECKIERR( ierr, "failed to write local intx mesh" );
397 #endif
398  }
399 
400  if( atmCouComm != MPI_COMM_NULL )
401  {
402 
403  // the new graph will be for sending data from atm comp to coverage mesh;
404  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
405  // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
406  // after this, the sending of tags from atm pes to coupler pes will use the new par comm
407  // graph, that has more precise info about what to send for ocean cover ; every time, we
408  // will
409  // use the element global id, which should uniquely identify the element
410  PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" )
411  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
412  &cplocn ); // it happens over joint communicator
413  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
414  POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank
415  }
416 #endif
417 
418 #ifdef ENABLE_ATMLND_COUPLING
419  if( couComm != MPI_COMM_NULL )
420  {
421  PUSH_TIMER( "Compute ATM-LND mesh intersection" )
422  ierr = iMOAB_ComputePointDoFIntersection( cplAtmPID, cplLndPID, cplAtmLndPID );
423  CHECKIERR( ierr, "failed to compute point-cloud mapping" );
424  POP_TIMER( couComm, rankInCouComm )
425  }
426  if( atmCouComm != MPI_COMM_NULL )
427  {
428  // the new graph will be for sending data from atm comp to coverage mesh for land mesh;
429  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
430  // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh
431  // after this, the sending of tags from atm pes to coupler pes will use the new par comm
432  // graph, that has more precise info about what to send (specifically for land cover); every
433  // time,
434  /// we will use the element global id, which should uniquely identify the element
435  PUSH_TIMER( "Compute LND coverage graph for ATM mesh" )
436  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmLndPID, &cmpatm, &cplatm,
437  &cpllnd ); // it happens over joint communicator
438  CHECKIERR( ierr, "cannot recompute direct coverage graph for land" )
439  POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank
440  }
441 #endif
442 
443  MPI_Barrier( MPI_COMM_WORLD );
444 
445  int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 0, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
446 
447 #ifdef ENABLE_ATMOCN_COUPLING
448 #ifdef VERBOSE
449  if( couComm != MPI_COMM_NULL && 1 == n )
450  { // write only for n==1 case
451  char serialWriteOptions[] = ""; // for writing in serial
452  std::stringstream outf;
453  outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
454  std::string intxfile = outf.str(); // write in serial the intx file, for debugging
455  ierr = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
456  CHECKIERR( ierr, "cannot write intx file result" )
457  }
458 #endif
459 
460  if( couComm != MPI_COMM_NULL )
461  {
462  PUSH_TIMER( "Compute the projection weights with TempestRemap" )
463  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(),
464  disc_methods[0].c_str(), &disc_orders[0], disc_methods[1].c_str(),
465  &disc_orders[1], nullptr, &fNoBubble, &fMonotoneTypeID,
466  &fVolumetric, &fInverseDistanceMap, &fNoConserve, &fValidate,
467  dof_tag_names[0].c_str(), dof_tag_names[1].c_str() );
468  CHECKIERR( ierr, "cannot compute scalar projection weights" )
469  POP_TIMER( couComm, rankInCouComm )
470 
471  // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
472 #ifdef MOAB_HAVE_NETCDF
473  {
474  const std::string atmocn_map_file_name = "atm_ocn_map.nc";
475  ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(),
476  atmocn_map_file_name.c_str() );
477  CHECKIERR( ierr, "failed to write map file to disk" );
478 
479  const std::string intx_from_file_identifier = "map-from-file";
480  int dummyCpl = -1;
481  int dummy_rowcol = -1;
482  int dummyType = 0;
483  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
484  intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() );
485  CHECKIERR( ierr, "failed to load map file from disk" );
486  }
487 #endif
488  }
489 
490 #endif
491 
492  MPI_Barrier( MPI_COMM_WORLD );
493 
494 #ifdef ENABLE_ATMLND_COUPLING
495  if( couComm != MPI_COMM_NULL )
496  {
497  fValidate = 0;
498  /* Compute the weights to preoject the solution from ATM component to LND compoenent */
499  PUSH_TIMER( "Compute ATM-LND remapping weights" )
500  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmLndPID, weights_identifiers[1].c_str(),
501  disc_methods[0].c_str(), &disc_orders[0], disc_methods[2].c_str(),
502  &disc_orders[2], nullptr, &fNoBubble, &fMonotoneTypeID,
503  &fVolumetric, &fInverseDistanceMap, &fNoConserve, &fValidate,
504  dof_tag_names[0].c_str(), dof_tag_names[2].c_str() );
505  CHECKIERR( ierr, "failed to compute remapping projection weights for ATM-LND scalar "
506  "non-conservative field" );
507  POP_TIMER( couComm, rankInCouComm )
508 
509  // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
510  // VSM: TODO: This does not work since the LND model is a point cloud and we do not initilize
511  // data correctly in TempestOnlineMap::WriteParallelWeightsToFile routine.
512  // {
513  // const char* atmlnd_map_file_name = "atm_lnd_map.nc";
514  // ierr = iMOAB_WriteMappingWeightsToFile( cplAtmLndPID, weights_identifiers[1], atmlnd_map_file_name );
515  // CHECKIERR( ierr, "failed to write map file to disk" );
516 
517  // const char* intx_from_file_identifier = "map-from-file";
518  // ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmLndPID, intx_from_file_identifier, atmlnd_map_file_name,
519  // NULL, NULL, NULL,
520  // );
521  // CHECKIERR( ierr, "failed to load map file from disk" );
522  // }
523  }
524 #endif
525 
526  int tagIndex[2];
527  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
528  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
529 
530  const char* bottomFields = "a2oTbot:a2oUbot:a2oVbot";
531  const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
532 
533  if( couComm != MPI_COMM_NULL )
534  {
535  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
536  CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
537 #ifdef ENABLE_ATMOCN_COUPLING
538  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
539  CHECKIERR( ierr, "failed to define the field tag a2oTbot_proj" );
540 #endif
541  }
542 
543  // need to make sure that the coverage mesh (created during intx method) received the tag that
544  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
545  // need to change the migrate method to accommodate any GLL tag
546  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
547  // (cplAtmPID), using the new coverage graph communicator
548 
549  // make the tag 0, to check we are actually sending needed data
550  {
551  if( cplAtmAppID >= 0 )
552  {
553  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
554  /*
555  * Each process in the communicator will have access to a local mesh instance, which
556  * will contain the original cells in the local partition and ghost entities. Number of
557  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
558  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
559  * numbers.
560  */
561  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
562  CHECKIERR( ierr, "failed to get num primary elems" );
563  int numAllElem = nelem[2];
564  std::vector< double > vals;
565  int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags
566  int eetype = 1;
567 
568  vals.resize( storLeng );
569  for( int k = 0; k < storLeng; k++ )
570  vals[k] = 0.;
571 
572  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
573  CHECKIERR( ierr, "cannot make tag nul" )
574  // set the tag to 0
575  }
576  }
577 
578  const char* concat_fieldname = "a2oTbot:a2oUbot:a2oVbot";
579  const char* concat_fieldnameT = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
580 
581  // start a virtual loop for number of iterations
582  for( int iters = 0; iters < n; iters++ )
583  {
584 #ifdef ENABLE_ATMOCN_COUPLING
585  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
586  if( atmComm != MPI_COMM_NULL )
587  {
588  // as always, use nonblocking sends
589  // this is for projection to ocean:
590  ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn );
591  CHECKIERR( ierr, "cannot send tag values" )
592 #ifdef GRAPH_INFO
593  int is_sender = 1;
594  int context = cplocn;
595  iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnS" );
596 #endif
597  }
598  if( couComm != MPI_COMM_NULL )
599  {
600  // receive on atm on coupler pes, that was redistributed according to coverage
601  ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn );
602  CHECKIERR( ierr, "cannot receive tag values" )
603 #ifdef GRAPH_INFO
604  int is_sender = 0;
605  int context = cplocn; // the same context, cplocn
606  iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnR" );
607 #endif
608  }
610 
611  // we can now free the sender buffers
612  if( atmComm != MPI_COMM_NULL )
613  {
614  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
615  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
616  }
617 #ifdef VERBOSE
618  if( couComm != MPI_COMM_NULL && 1 == n )
619  {
620  // write only for n==1 case
621  char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
622  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
623  CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
624  }
625 #endif
626 
627  if( couComm != MPI_COMM_NULL )
628  {
629  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
630  on the source mesh and get the projection on the target mesh */
631  PUSH_TIMER( "Apply Scalar projection weights" )
632  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), concat_fieldname,
633  concat_fieldnameT );
634  CHECKIERR( ierr, "failed to compute projection weight application" );
635  POP_TIMER( couComm, rankInCouComm )
636  if( 1 == n ) // write only for n==1 case
637  {
638  char outputFileTgt[] = "fOcnOnCpl.h5m";
639  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
640  CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
641  }
642  }
643 
644  // send the projected tag back to ocean pes, with send/receive tag
645  if( ocnComm != MPI_COMM_NULL )
646  {
647  int tagIndexIn2;
648  ierr =
649  iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
650  CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
651  "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" );
652  }
653  // send the tag to ocean pes, from ocean mesh on coupler pes
654  // from couComm, using common joint comm ocn_coupler
655  // as always, use nonblocking sends
656  // original graph (context is -1_
657  if( couComm != MPI_COMM_NULL )
658  {
659  // need to use ocean comp id for context
660  context_id = cmpocn; // id for ocean on comp
661  ierr =
662  iMOAB_SendElementTag( cplOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm, &context_id );
663  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
664  }
665 
666  // receive on component 2, ocean
667  if( ocnComm != MPI_COMM_NULL )
668  {
669  context_id = cplocn; // id for ocean on coupler
670  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm,
671  &context_id );
672  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
673  }
674 
675  MPI_Barrier( MPI_COMM_WORLD );
676 
677  if( couComm != MPI_COMM_NULL )
678  {
679  context_id = cmpocn;
680  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
681  CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
682  }
683  if( ocnComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case
684  {
685  char outputFileOcn[] = "OcnWithProj.h5m";
686  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
687  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
688  // test results only for n == 1, for bottomTempProjectedField
689  if( !no_regression_test )
690  {
691  // the same as remap test
692  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
693  // first get GlobalIds from ocn, and fields:
694  int nverts[3], nelem[3];
695  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
696  CHECKIERR( ierr, "failed to get ocn mesh info" );
697  std::vector< int > gidElems;
698  gidElems.resize( nelem[2] );
699  std::vector< double > tempElems;
700  tempElems.resize( nelem[2] );
701  // get global id storage
702  const std::string GidStr = "GLOBAL_ID"; // hard coded too
703  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
704  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
705  CHECKIERR( ierr, "failed to define global id tag" );
706 
707  int ent_type = 1;
708  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
709  CHECKIERR( ierr, "failed to get global ids" );
710  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
711  CHECKIERR( ierr, "failed to get temperature field" );
712  int err_code = 1;
713  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
714  if( 0 == err_code )
715  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
716  }
717  }
718 #endif
719 
720 #ifdef ENABLE_ATMLND_COUPLING
721  // start land proj:
722  PUSH_TIMER( "Send/receive data from component atm to coupler, in land context" )
723  if( atmComm != MPI_COMM_NULL )
724  {
725  // as always, use nonblocking sends
726  // this is for projection to land:
727  ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cpllnd );
728  CHECKIERR( ierr, "cannot send tag values" )
729  }
730  if( couComm != MPI_COMM_NULL )
731  {
732  // receive on atm on coupler pes, that was redistributed according to coverage, for land
733  // context
734  ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cpllnd );
735  CHECKIERR( ierr, "cannot receive tag values" )
736  }
738 
739  // we can now free the sender buffers
740  if( atmComm != MPI_COMM_NULL )
741  {
742  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cpllnd );
743  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh "
744  "for land context" )
745  }
746 #ifdef VERBOSE
747  if( couComm != MPI_COMM_NULL && 1 == n )
748  { // write only for n==1 case
749  char outputFileRecvd[] = "recvAtmCoupLnd.h5m";
750  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
751  CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
752  }
753 #endif
754 
755  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
756  on the source mesh and get the projection on the target mesh */
757  if( couComm != MPI_COMM_NULL )
758  {
759  PUSH_TIMER( "Apply Scalar projection weights for land" )
760  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmLndPID, weights_identifiers[1].c_str(), concat_fieldname,
761  concat_fieldnameT );
762  CHECKIERR( ierr, "failed to compute projection weight application" );
763  POP_TIMER( couComm, rankInCouComm )
764  }
765 
766 #ifdef VERBOSE
767  if( couComm != MPI_COMM_NULL && 1 == n )
768  { // write only for n==1 case
769  char outputFileTgtLnd[] = "fLndOnCpl.h5m";
770  ierr = iMOAB_WriteMesh( cplLndPID, outputFileTgtLnd, fileWriteOptions );
771  CHECKIERR( ierr, "could not write fLndOnCpl.h5m to disk" )
772  }
773 #endif
774 
775  // end land proj
776  // send the tags back to land pes, from land mesh on coupler pes
777  // send from cplLndPID to cmpLndPID, using common joint comm
778  // as always, use nonblocking sends
779  // original graph
780  // int context_id = -1;
781  // the land might not have these tags yet; it should be a different name for land
782  // in e3sm we do have different names
783  if( lndComm != MPI_COMM_NULL )
784  {
785  int tagIndexIn2;
786  ierr =
787  iMOAB_DefineTagStorage( cmpLndPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
788  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
789  "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on lnd pes" );
790  }
791  if( couComm != MPI_COMM_NULL )
792  {
793  context_id = cmplnd; // land comp id
794  ierr =
795  iMOAB_SendElementTag( cplLndPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &lndCouComm, &context_id );
796  CHECKIERR( ierr, "cannot send tag values back to land pes" )
797  }
798  // receive on component 3, land
799  if( lndComm != MPI_COMM_NULL )
800  {
801  context_id = cpllnd; // land on coupler id
802  ierr = iMOAB_ReceiveElementTag( cmpLndPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &lndCouComm,
803  &context_id );
804  CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" )
805  }
806 
807  MPI_Barrier( MPI_COMM_WORLD );
808  if( couComm != MPI_COMM_NULL )
809  {
810  context_id = cmplnd;
811  ierr = iMOAB_FreeSenderBuffers( cplLndPID, &context_id );
812  CHECKIERR( ierr, "cannot free buffers used to resend land tag towards the coverage mesh "
813  "for atm context" )
814  }
815  if( lndComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case
816  {
817  char outputFileLnd[] = "LndWithProj.h5m";
818  ierr = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions );
819  CHECKIERR( ierr, "could not write LndWithProj.h5m to disk" )
820  }
821 #endif // ENABLE_ATMLND_COUPLING
822 
823  } // end loop iterations n
824 #ifdef ENABLE_ATMLND_COUPLING
825  if( lndComm != MPI_COMM_NULL )
826  {
827  ierr = iMOAB_DeregisterApplication( cmpLndPID );
828  CHECKIERR( ierr, "cannot deregister app LND1" )
829  }
830 #endif // ENABLE_ATMLND_COUPLING
831 
832 #ifdef ENABLE_ATMOCN_COUPLING
833  if( couComm != MPI_COMM_NULL )
834  {
835  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
836  CHECKIERR( ierr, "cannot deregister app intx AO" )
837  }
838  if( ocnComm != MPI_COMM_NULL )
839  {
840  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
841  CHECKIERR( ierr, "cannot deregister app OCN1" )
842  }
843 #endif // ENABLE_ATMOCN_COUPLING
844 
845  if( atmComm != MPI_COMM_NULL )
846  {
847  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
848  CHECKIERR( ierr, "cannot deregister app ATM1" )
849  }
850 
851 #ifdef ENABLE_ATMLND_COUPLING
852  if( couComm != MPI_COMM_NULL )
853  {
854  ierr = iMOAB_DeregisterApplication( cplLndPID );
855  CHECKIERR( ierr, "cannot deregister app LNDX" )
856  }
857 #endif // ENABLE_ATMLND_COUPLING
858 
859 #ifdef ENABLE_ATMOCN_COUPLING
860  if( couComm != MPI_COMM_NULL )
861  {
862  ierr = iMOAB_DeregisterApplication( cplOcnPID );
863  CHECKIERR( ierr, "cannot deregister app OCNX" )
864  }
865 #endif // ENABLE_ATMOCN_COUPLING
866 
867  if( couComm != MPI_COMM_NULL )
868  {
869  ierr = iMOAB_DeregisterApplication( cplAtmPID );
870  CHECKIERR( ierr, "cannot deregister app ATMX" )
871  }
872 
873  //#endif
874  ierr = iMOAB_Finalize();
875  CHECKIERR( ierr, "did not finalize iMOAB" )
876 
877  // free atm coupler group and comm
878  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
879  MPI_Group_free( &joinAtmCouGroup );
880  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
881 
882 #ifdef ENABLE_ATMOCN_COUPLING
883  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
884  // free ocn - coupler group and comm
885  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
886  MPI_Group_free( &joinOcnCouGroup );
887 #endif
888 
889 #ifdef ENABLE_ATMLND_COUPLING
890  if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm );
891  // free land - coupler group and comm
892  if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm );
893  MPI_Group_free( &joinLndCouGroup );
894 #endif
895 
896  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
897 
898  MPI_Group_free( &atmPEGroup );
899 #ifdef ENABLE_ATMOCN_COUPLING
900  MPI_Group_free( &ocnPEGroup );
901 #endif
902 #ifdef ENABLE_ATMLND_COUPLING
903  MPI_Group_free( &lndPEGroup );
904 #endif
905  MPI_Group_free( &couPEGroup );
906  MPI_Group_free( &jgroup );
907 
908  MPI_Finalize();
909 
910  return 0;
911 }