MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_phatm_ocn_coupler.cpp
Go to the documentation of this file.
1 /**
2  * imoab_phAtm_ocn_coupler.cpp
3  *
4  * This imoab_phAtm_ocn_coupler test will simulate coupling between 3 components
5  * meshes will be loaded from 3 files (atm, ocean, and phys atm), and
6  * Atm and ocn will be migrated to coupler pes and compute intx on them
7  * then, intx will be performed between migrated meshes
8  * and weights will be generated, such that a field from phys atm will be transferred to ocn
9  * currently, the phys atm will send some data to be projected to ocean
10  *
11  * first, intersect atm and ocn, and recompute comm graph 1 between atm phys and atm_cx, for ocn
12  * intx maybe repeat the same for land
13  */
14 
15 #include "moab/Core.hpp"
16 #ifndef MOAB_HAVE_MPI
17 #error mbtempest tool requires MPI configuration
18 #endif
19 
20 // MPI includes
21 #include "moab_mpi.h"
22 #include "moab/ParallelComm.hpp"
23 #include "MBParallelConventions.h"
24 
25 #include "moab/iMOAB.h"
26 #include "TestUtil.hpp"
27 #include "moab/CpuTimer.hpp"
28 #include "moab/ProgOptions.hpp"
29 #include <iostream>
30 #include <sstream>
31 
32 #include "imoab_coupler_utils.hpp"
33 
34 using namespace moab;
35 
36 //#define VERBOSE
37 
38 #ifndef MOAB_HAVE_TEMPESTREMAP
39 #error The climate coupler test example requires MOAB configuration with TempestRemap
40 #endif
41 
42 #define ENABLE_ATMOCN_COUPLING
43 // for land coupling with phys atm, we do not have to "compute" intersection
44 // it is enough to know that the ids are the same, we can project from atm to land that way, using a
45 // computed graph
46 #define ENABLE_ATMLND_COUPLING
47 
48 int main( int argc, char* argv[] )
49 {
50  int ierr;
52  MPI_Group jgroup;
53  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
54  std::string readoptsPhysAtm( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
55 
56  // Timer data
57  moab::CpuTimer timer;
58  double timer_ops;
59  std::string opName;
60 
61  int repartitioner_scheme = 0;
62 #ifdef MOAB_HAVE_ZOLTAN
63  repartitioner_scheme = 2; // use the graph partitioner in that case
64 #endif
65 
66  MPI_Init( &argc, &argv );
67  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
68  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
69 
70  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
71 
72  std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m"; // we should use only mesh from here
73  std::string atmPhysMesh =
74  TestDir + "unittest/AtmPhys_01.h5m"; // it has some data associated to vertices, T_ph, u_ph, v_ph
75  // we will eventually project that data to ocean mesh, after intx atm/ocn
76 
77  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
78  // intx atm/ocn is not in e3sm yet, give a number
79  // 6 * 100+ 18 = 618 : atmocnid
80  // 9 LND, 10 CPLLND
81  // 6 * 100 + 10 = 610 atmlndid:
82  // cmpatm is for atm on atm pes
83  // cmpocn is for ocean, on ocean pe
84  // cplatm is for atm on coupler pes
85  // cplocn is for ocean on coupler pes
86  // atmocnid is for intx atm / ocn on coupler pes
87  //
88 
89  int cmpatm = 5,
90  cplatm = 6; // component ids are unique over all pes, and established in advance;
91  int cmpPhysAtm = 105; // different from atm spectral ?
92 #ifdef ENABLE_ATMOCN_COUPLING
93  std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
94  int rankInOcnComm = -1;
95  int cmpocn = 17, cplocn = 18,
96  atmocnid = 618; // component ids are unique over all pes, and established in advance;
97 #endif
98 #ifdef ENABLE_ATMLND_COUPLING
99  std::string lndFilename = TestDir + "unittest/wholeLnd.h5m";
100  int cpllnd = 10,
101  cmplnd = 9; // component ids are unique over all pes, and established in advance;
102 #endif
103 
104  int rankInCouComm = -1;
105 
106  int nghlay = 0; // number of ghost layers for loading the file
107 
108  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1, endG3 = endG1;
109  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
110  int context_id = -1; // used now for freeing buffers
111 
112  // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
113  // later, we need to compute weight matrix with tempestremap
114 
115  ProgOptions opts;
116  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
117 #ifdef ENABLE_ATMOCN_COUPLING
118  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
119 #endif
120 
121 #ifdef ENABLE_ATMLND_COUPLING
122  opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename );
123 #endif
124 
125  opts.addOpt< std::string >( "physgrid,q", "physics grid file", &atmPhysMesh );
126 
127  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
128  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
129 #ifdef ENABLE_ATMOCN_COUPLING
130  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
131  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
132 #endif
133 
134 #ifdef ENABLE_ATMLND_COUPLING
135  opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 );
136  opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 );
137 #endif
138 
139  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
140  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
141 
142  opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
143 
144  opts.parseCommandLine( argc, argv );
145 
146  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
147 
148  if( !rankInGlobalComm )
149  {
150  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 <<
151 #ifdef ENABLE_ATMOCN_COUPLING
152  "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 <<
153 #endif
154 #ifdef ENABLE_ATMLND_COUPLING
155  "\n land file: " << lndFilename << "\n on tasks : " << startG3 << ":" << endG3 <<
156 #endif
157 
158  "\n atm phys file: " << atmPhysMesh << "\n on tasks : " << startG1 << ":" << endG1 <<
159 
160  "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n ";
161  }
162 
163  // load files on 3 different communicators, groups
164  // first groups has task 0, second group tasks 0 and 1
165  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
166  MPI_Group atmPEGroup;
167  MPI_Comm atmComm;
168  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
169  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
170 
171 #ifdef ENABLE_ATMOCN_COUPLING
172  MPI_Group ocnPEGroup;
173  MPI_Comm ocnComm;
174  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
175  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
176 #endif
177 
178 #ifdef ENABLE_ATMLND_COUPLING
179  MPI_Group lndPEGroup;
180  MPI_Comm lndComm;
181  ierr = create_group_and_comm( startG3, endG3, jgroup, &lndPEGroup, &lndComm );
182  CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " )
183 #endif
184 
185  // we will always have a coupler
186  MPI_Group couPEGroup;
187  MPI_Comm couComm;
188  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
189  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
190 
191  // now, create the joint communicators atm_coupler, ocn_coupler, lnd_coupler
192  // for each, we will have to create the group first, then the communicator
193 
194  // atm_coupler
195  MPI_Group joinAtmCouGroup;
196  MPI_Comm atmCouComm;
197  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
198  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
199 
200 #ifdef ENABLE_ATMOCN_COUPLING
201  // ocn_coupler
202  MPI_Group joinOcnCouGroup;
203  MPI_Comm ocnCouComm;
204  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
205  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
206 #endif
207 
208 #ifdef ENABLE_ATMLND_COUPLING
209  // lnd_coupler
210  MPI_Group joinLndCouGroup;
211  MPI_Comm lndCouComm;
212  ierr = create_joint_comm_group( lndPEGroup, couPEGroup, &joinLndCouGroup, &lndCouComm );
213  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
214 #endif
215 
216  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
217  CHECKIERR( ierr, "Cannot initialize iMOAB" )
218 
219  int cmpAtmAppID = -1;
220  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
221  int cplAtmAppID = -1; // -1 means it is not initialized
222  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
223 
224 #ifdef ENABLE_ATMOCN_COUPLING
225  int cmpOcnAppID = -1;
226  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
227  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
228  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
229  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
230 #endif
231 
232 #ifdef ENABLE_ATMLND_COUPLING
233  int cmpLndAppID = -1;
234  iMOAB_AppID cmpLndPID = &cmpLndAppID; // lnd
235  int cplLndAppID = -1; //, cplAtmLndAppID=-1;// -1 means it is not initialized
236  iMOAB_AppID cplLndPID = &cplLndAppID; // land on coupler PEs
237  // iMOAB_AppID cplAtmLndPID=&cplAtmLndAppID; // intx atm - lnd on coupler PEs not needed anymore
238 #endif
239 
240  int cmpPhysAtmID = -1;
241  iMOAB_AppID cmpPhAtmPID = &cmpPhysAtmID; // phys atm; we do not need to move it to cpl!
242 
243  if( couComm != MPI_COMM_NULL )
244  {
245  MPI_Comm_rank( couComm, &rankInCouComm );
246  // Register all the applications on the coupler PEs
247  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
248  cplAtmPID ); // atm on coupler pes
249  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
250 #ifdef ENABLE_ATMOCN_COUPLING
251  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
252  cplOcnPID ); // ocn on coupler pes
253  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
254 #endif
255 
256 #ifdef ENABLE_ATMLND_COUPLING
257  ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd,
258  cplLndPID ); // lnd on coupler pes
259  CHECKIERR( ierr, "Cannot register LND over coupler PEs" )
260 #endif
261  }
262 
263  if( atmCouComm != MPI_COMM_NULL )
264  {
265  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
266  CHECKIERR( ierr, "Cannot register ATM App" )
267  }
268 
269 #ifdef ENABLE_ATMOCN_COUPLING
270  if( ocnComm != MPI_COMM_NULL )
271  {
272  MPI_Comm_rank( ocnComm, &rankInOcnComm );
273  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
274  CHECKIERR( ierr, "Cannot register OCN App" )
275  }
276 #endif
277 
278  // atm
279  ierr =
280  setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
281  &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
282  CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
283 
284  MPI_Barrier( MPI_COMM_WORLD );
285 
286 #ifdef ENABLE_ATMOCN_COUPLING
287  // ocean
288  ierr =
289  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
290  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
291  CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
292 #ifdef VERBOSE
293  if( couComm != MPI_COMM_NULL )
294  {
295  char outputFileTgt3[] = "recvOcn2.h5m";
296  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
297  CHECKIERR( ierr, "cannot write ocn mesh after receiving" )
298  }
299 #endif
300 
301 #endif
302 
303  MPI_Barrier( MPI_COMM_WORLD );
304 
305  // load phys atm mesh, with some data on it already
306  if( atmComm != MPI_COMM_NULL )
307  {
308  ierr = iMOAB_RegisterApplication( "PhysAtm", &atmComm, &cmpPhysAtm, cmpPhAtmPID );
309  CHECKIERR( ierr, "Cannot register Phys Atm App " )
310 
311  // load the next component mesh
312  ierr = iMOAB_LoadMesh( cmpPhAtmPID, atmPhysMesh.c_str(), readoptsPhysAtm.c_str(), &nghlay );
313  CHECKIERR( ierr, "Cannot load Atm Phys mesh on atm pes" )
314 
315  int nverts[3], nelem[3];
316  ierr = iMOAB_GetMeshInfo( cmpPhAtmPID, nverts, nelem, 0, 0, 0 );
317  CHECKIERR( ierr, "failed to get mesh info" );
318  printf( "Phys Atm Component Mesh: %d vertices and %d elements\n", nverts[0], nelem[0] );
319  }
320 
321  MPI_Barrier( MPI_COMM_WORLD );
322 
323 #ifdef ENABLE_ATMLND_COUPLING
324  // land
325  if( lndComm != MPI_COMM_NULL )
326  {
327  ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID );
328  CHECKIERR( ierr, "Cannot register LND App " )
329  }
330  ierr = setup_component_coupler_meshes( cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm, &lndPEGroup, &couComm,
331  &couPEGroup, &lndCouComm, lndFilename, readoptsPhysAtm, nghlay,
332  repartitioner_scheme );
333  if( couComm != MPI_COMM_NULL )
334  {
335  char outputFileLnd[] = "recvLnd2.h5m";
336 
337  ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions );
338  CHECKIERR( ierr, "cannot write lnd mesh after receiving" )
339  }
340 
341 #endif //
342 
343  MPI_Barrier( MPI_COMM_WORLD );
344 
345 #ifdef ENABLE_ATMOCN_COUPLING
346  if( couComm != MPI_COMM_NULL )
347  {
348  // now compute intersection between OCNx and ATMx on coupler PEs
349  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
350  CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
351  }
352 #endif
353 
354  const char* weights_identifiers[2] = { "scalar", "scalar-pc" };
355  int disc_orders[3] = { 4, 1, 1 };
356  const char* disc_methods[3] = { "cgll", "fv", "pcloud" };
357  const char* dof_tag_names[3] = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
358 #ifdef ENABLE_ATMOCN_COUPLING
359  if( couComm != MPI_COMM_NULL )
360  {
361  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplOcnPID, cplAtmOcnPID );
362  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
363  // basically, atm was redistributed according to target (ocean) partition, to "cover" the
364  // ocean partitions check if intx valid, write some h5m intx file
365  CHECKIERR( ierr, "cannot compute intersection" )
366  }
367 
368  if( atmCouComm != MPI_COMM_NULL )
369  {
370  // the new graph will be for sending data from atm comp to coverage mesh;
371  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
372  // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
373  // after this, the sending of tags from atm pes to coupler pes will use the new par comm
374  // graph, that has more precise info about what to send for ocean cover ; every time, we
375  // will
376  // use the element global id, which should uniquely identify the element
377 
378  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
379  &cplocn ); // it happens over joint communicator
380  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
381  }
382 
383  // need to compute graph between phys atm and atm/ocn intx coverage
384  if( atmCouComm != MPI_COMM_NULL )
385  {
386  int typeA = 2; // point cloud
387  int typeB = 1; // quads in coverage set
388  ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmOcnPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
389  &cmpatm, &atmocnid );
390  }
391 #endif
392 
393 #ifdef ENABLE_ATMLND_COUPLING
394  // we will compute comm graph between atm phys and land, directly; we do not need any
395  // intersection later, we will send data from atm phys to land on coupler; then back to land
396  // comp;
397  if( atmCouComm != MPI_COMM_NULL )
398  {
399  int typeA = 2; // point cloud
400  int typeB = 2; // point cloud for land on coupler, too
401  ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplLndPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
402  &cmpatm, &cpllnd );
403  }
404 #endif
405  MPI_Barrier( MPI_COMM_WORLD );
406 
407  int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
408 
409 #ifdef ENABLE_ATMOCN_COUPLING
410 #ifdef VERBOSE
411  if( couComm != MPI_COMM_NULL )
412  {
413  char serialWriteOptions[] = ""; // for writing in serial
414  std::stringstream outf;
415  outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
416  std::string intxfile = outf.str(); // write in serial the intx file, for debugging
417  ierr = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
418  CHECKIERR( ierr, "cannot write intx file result" )
419  }
420 #endif
421 
422  if( couComm != MPI_COMM_NULL )
423  {
424  PUSH_TIMER( "Compute the projection weights with TempestRemap" )
425  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], disc_methods[0],
426  &disc_orders[0], disc_methods[1], &disc_orders[1], nullptr,
427  &fNoBubble, &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap,
428  &fNoConserve, &fValidate, dof_tag_names[0], dof_tag_names[1] );
429  CHECKIERR( ierr, "cannot compute scalar projection weights" )
430  POP_TIMER( couComm, rankInCouComm )
431  }
432 
433 #endif
434 
435  MPI_Barrier( MPI_COMM_WORLD );
436 
437  int tagIndex[2];
438  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
439  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
440 
441  const char* bottomFields =
442  "Sa_z:Sa_topo:Sa_u:Sa_v:Sa_tbot:Sa_ptem:Sa_shum:Sa_pbot:Sa_dens:Sa_uovern:Sa_pslv:Sa_co2prog:Sa_co2diag:Faxa_"
443  "rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl:Faxa_lwdn:Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf:Faxa_swnet:Faxa_"
444  "bcphidry:Faxa_bcphodry:Faxa_bcphiwet:Faxa_ocphidry:Faxa_ocphodry:Faxa_ocphiwet:Faxa_dstwet1:Faxa_dstwet2:Faxa_"
445  "dstwet3:Faxa_dstwet4:Faxa_dstdry1:Faxa_dstdry2:Faxa_dstdry3:Faxa_dstdry4";
446  const char* bottomFieldsExt =
447  "Sa_z_ext:Sa_topo_ext:Sa_u_ext:Sa_v_ext:Sa_tbot_ext:Sa_ptem_ext:Sa_shum_ext:Sa_pbot_ext:Sa_dens_ext:Sa_uovern_"
448  "ext:Sa_pslv_ext:Sa_co2prog_ext:Sa_co2diag_ext:Faxa_rainc_ext:Faxa_rainl_ext:Faxa_snowc_ext:Faxa_snowl_ext:"
449  "Faxa_lwdn_ext:Faxa_swndr_ext:Faxa_swvdr_ext:Faxa_swndf_ext:Faxa_swvdf_ext:Faxa_swnet_ext:Faxa_bcphidry_ext:"
450  "Faxa_bcphodry_ext:Faxa_bcphiwet_ext:Faxa_ocphidry_ext:Faxa_ocphodry_ext:Faxa_ocphiwet_ext:Faxa_dstwet1_ext:"
451  "Faxa_dstwet2_ext:Faxa_dstwet3_ext:Faxa_dstwet4_ext:Faxa_dstdry1_ext:Faxa_dstdry2_ext:Faxa_dstdry3_ext:Faxa_"
452  "dstdry4_ext";
453 
454  if( couComm != MPI_COMM_NULL )
455  {
456  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFieldsExt, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
457  CHECKIERR( ierr, "failed to define the field tags T16_ph:u16_ph:v16_ph " );
458 #ifdef ENABLE_ATMOCN_COUPLING
459 
460  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
461  CHECKIERR( ierr, "failed to define the field tags T_proj:u_proj:v_proj" );
462 #endif
463 
464 #ifdef ENABLE_ATMLND_COUPLING
465  // need to define tag storage for land; will use the same T_proj, u_proj, v_proj name,
466  // because it will be used to send between point clouds ! use the same ndof and same size as
467  // ocnCompNDoFs (1) !!
468  ierr = iMOAB_DefineTagStorage( cplLndPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
469  CHECKIERR( ierr, "failed to define the bottomFields tag on coupler land" );
470 #endif
471  }
472  // need to make sure that the coverage mesh (created during intx method) received the tag that
473  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
474  // need to change the migrate method to accommodate any GLL tag
475  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
476  // (cplAtmPID), using the new coverage graph communicator
477 
478  // make the tag 0, to check we are actually sending needed data
479  {
480  if( cplAtmAppID >= 0 )
481  {
482  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
483  /*
484  * Each process in the communicator will have access to a local mesh instance, which
485  * will contain the original cells in the local partition and ghost entities. Number of
486  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
487  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
488  * numbers.
489  */
490  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
491  CHECKIERR( ierr, "failed to get num primary elems" );
492  int numAllElem = nelem[2];
493  std::vector< double > vals;
494  int storLeng = atmCompNDoFs * numAllElem * 37;
495  vals.resize( storLeng );
496  for( int k = 0; k < storLeng; k++ )
497  vals[k] = 0.;
498  int eetype = 1;
499  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFieldsExt, &storLeng, &eetype, &vals[0] );
500  CHECKIERR( ierr, "cannot make tags nul" )
501  // set the tag to 0
502  }
503  }
504 
505 #ifdef ENABLE_ATMOCN_COUPLING
506  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
507  if( atmComm != MPI_COMM_NULL )
508  {
509  // as always, use nonblocking sends
510  // this is for projection to ocean:
511  ierr = iMOAB_SendElementTag( cmpPhAtmPID, bottomFields, &atmCouComm, &atmocnid );
512  CHECKIERR( ierr, "cannot send tag values" )
513 #ifdef VERBOSE
514  int is_sender = 1;
515  int context = atmocnid; // used to identity the parcommgraph in charge
516  iMOAB_DumpCommGraph( cmpPhAtmPID, &context, &is_sender, "PhysAtmA2OS", );
517 #endif
518  }
519  if( couComm != MPI_COMM_NULL )
520  {
521  // receive on atm on coupler pes, that was redistributed according to coverage
522  ierr = iMOAB_ReceiveElementTag( cplAtmOcnPID, bottomFieldsExt, &atmCouComm, &cmpatm );
523  CHECKIERR( ierr, "cannot receive tag values" )
524 #ifdef VERBOSE
525  int is_sender = 0;
526  int context = cmpatm; // used to identity the parcommgraph in charge
527  iMOAB_DumpCommGraph( cplAtmOcnPID, &context, &is_sender, "PhysAtmA2OR", );
528 #endif
529  }
531 
532  // we can now free the sender buffers
533  if( atmComm != MPI_COMM_NULL )
534  {
535  ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &atmocnid ); // context is for ocean
536  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
537  }
538  /*
539  #ifdef VERBOSE
540  if (couComm != MPI_COMM_NULL) {
541  char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
542  ierr = iMOAB_WriteMesh(cplAtmPID, outputFileRecvd, fileWriteOptions,
543  strlen(outputFileRecvd), strlen(fileWriteOptions) );
544  }
545  #endif
546  */
547 
548  if( couComm != MPI_COMM_NULL )
549  {
550  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
551  on the source mesh and get the projection on the target mesh */
552  PUSH_TIMER( "Apply Scalar projection weights" )
553  ierr =
554  iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], bottomFieldsExt, bottomFields );
555  CHECKIERR( ierr, "failed to compute projection weight application" );
556  POP_TIMER( couComm, rankInCouComm )
557 
558  char outputFileTgt[] = "fOcnOnCpl7.h5m";
559  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
560  }
561  // send the projected tag back to ocean pes, with send/receive tag
562  if( ocnComm != MPI_COMM_NULL )
563  {
564  int tagIndexIn2;
565  ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
566  CHECKIERR( ierr, "failed to define the field tags for receiving back the atm tags on ocn pes" );
567  }
568  // send the tag to ocean pes, from ocean mesh on coupler pes
569  // from couComm, using common joint comm ocn_coupler
570  // as always, use nonblocking sends
571  // original graph (context is -1_
572  if( couComm != MPI_COMM_NULL )
573  {
574  context_id = cmpocn;
575  ierr = iMOAB_SendElementTag( cplOcnPID, bottomFields, &ocnCouComm, &context_id );
576  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
577  }
578 
579  // receive on component 2, ocean
580  if( ocnComm != MPI_COMM_NULL )
581  {
582  context_id = cplocn;
583  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, bottomFields, &ocnCouComm, &context_id );
584  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
585  }
586  MPI_Barrier( MPI_COMM_WORLD );
587 
588  if( couComm != MPI_COMM_NULL )
589  {
590  context_id = cmpocn;
591  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
592  }
593  if( ocnComm != MPI_COMM_NULL )
594  {
595  char outputFileOcn[] = "OcnWithProj2.h5m";
596  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
597  }
598 
599 #endif
600 
601 #ifdef ENABLE_ATMLND_COUPLING
602  // start land proj:
603 
604  // we used this to compute
605  // ierr = iMOAB_ComputeCommGraph(cmpPhAtmPID, cplLndPID, &atmCouComm, &atmPEGroup, &couPEGroup,
606  // &typeA, &typeB, &cmpatm, &cpllnd);
607 
608  // end copy
609  PUSH_TIMER( "Send/receive data from phys comp atm to coupler land, using computed graph" )
610  if( atmComm != MPI_COMM_NULL )
611  {
612 
613  // as always, use nonblocking sends
614  // this is for projection to land:
615  ierr = iMOAB_SendElementTag( cmpPhAtmPID, bottomFields, &atmCouComm, &cpllnd );
616  CHECKIERR( ierr, "cannot send tag values towards cpl on land" )
617  }
618  if( couComm != MPI_COMM_NULL )
619  {
620  // receive on lnd on coupler pes
621  ierr = iMOAB_ReceiveElementTag( cplLndPID, bottomFields, &atmCouComm, &cmpatm );
622  CHECKIERR( ierr, "cannot receive tag values on land on coupler" )
623  }
625 
626  // we can now free the sender buffers
627  if( atmComm != MPI_COMM_NULL )
628  {
629  ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &cpllnd );
630  CHECKIERR( ierr, "cannot free buffers used to send atm tag towards the land on coupler" )
631  }
632 
633 #ifdef VERBOSE
634  if( couComm != MPI_COMM_NULL )
635  {
636  char outputFileTgtLnd[] = "fLndOnCpl.h5m";
637  ierr = iMOAB_WriteMesh( cplLndPID, outputFileTgtLnd, fileWriteOptions );
638  }
639 #endif
640 
641  // end land proj
642  // send the tags back to land pes, from land mesh on coupler pes
643  // send from cplLndPID to cmpLndPID, using common joint comm
644  // as always, use nonblocking sends
645  // original graph
646  // int context_id = -1;
647  // the land might not have these tags yet; it should be a different name for land
648  // in e3sm we do have different names
649  if( lndComm != MPI_COMM_NULL )
650  {
651  int tagIndexIn2;
652  ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
653  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag T_proj:u_proj:v_proj on lnd pes" );
654  }
655  if( couComm != MPI_COMM_NULL )
656  {
657  context_id = cmplnd;
658  ierr = iMOAB_SendElementTag( cplLndPID, bottomFields, &lndCouComm, &context_id );
659  CHECKIERR( ierr, "cannot send tag values back to land pes" )
660  }
661  // receive on component 3, land
662  if( lndComm != MPI_COMM_NULL )
663  {
664  context_id = cpllnd; // receive from coupler on land
665  ierr = iMOAB_ReceiveElementTag( cmpLndPID, bottomFields, &lndCouComm, &context_id );
666  CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" )
667  }
668 
669  MPI_Barrier( MPI_COMM_WORLD );
670  if( couComm != MPI_COMM_NULL )
671  {
672  context_id = cmplnd;
673  ierr = iMOAB_FreeSenderBuffers( cplLndPID, &context_id );
674  }
675  if( lndComm != MPI_COMM_NULL )
676  {
677  char outputFileLnd[] = "LndWithProj2.h5m";
678  ierr = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions );
679  }
680 
681 #endif // ENABLE_ATMLND_COUPLING
682 
683 #ifdef ENABLE_ATMOCN_COUPLING
684  if( couComm != MPI_COMM_NULL )
685  {
686  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
687  CHECKIERR( ierr, "cannot deregister app intx AO" )
688  }
689 #endif
690 
691 #ifdef ENABLE_ATMLND_COUPLING
692  if( lndComm != MPI_COMM_NULL )
693  {
694  ierr = iMOAB_DeregisterApplication( cmpLndPID );
695  CHECKIERR( ierr, "cannot deregister app LND1" )
696  }
697 #endif // ENABLE_ATMLND_COUPLING
698  if( atmComm != MPI_COMM_NULL )
699  {
700  ierr = iMOAB_DeregisterApplication( cmpPhAtmPID );
701  CHECKIERR( ierr, "cannot deregister app PhysAtm " )
702  }
703 #ifdef ENABLE_ATMOCN_COUPLING
704  if( ocnComm != MPI_COMM_NULL )
705  {
706  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
707  CHECKIERR( ierr, "cannot deregister app OCN1" )
708  }
709 #endif // ENABLE_ATMOCN_COUPLING
710 
711  if( atmComm != MPI_COMM_NULL )
712  {
713  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
714  CHECKIERR( ierr, "cannot deregister app ATM1" )
715  }
716 
717 #ifdef ENABLE_ATMLND_COUPLING
718  if( couComm != MPI_COMM_NULL )
719  {
720  ierr = iMOAB_DeregisterApplication( cplLndPID );
721  CHECKIERR( ierr, "cannot deregister app LNDX" )
722  }
723 #endif // ENABLE_ATMLND_COUPLING
724 
725 #ifdef ENABLE_ATMOCN_COUPLING
726  if( couComm != MPI_COMM_NULL )
727  {
728  ierr = iMOAB_DeregisterApplication( cplOcnPID );
729  CHECKIERR( ierr, "cannot deregister app OCNX" )
730  }
731 #endif // ENABLE_ATMOCN_COUPLING
732 
733  if( couComm != MPI_COMM_NULL )
734  {
735  ierr = iMOAB_DeregisterApplication( cplAtmPID );
736  CHECKIERR( ierr, "cannot deregister app ATMX" )
737  }
738 
739  //#endif
740  ierr = iMOAB_Finalize();
741  CHECKIERR( ierr, "did not finalize iMOAB" )
742 
743  // free atm coupler group and comm
744  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
745  MPI_Group_free( &joinAtmCouGroup );
746  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
747 
748 #ifdef ENABLE_ATMOCN_COUPLING
749  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
750  // free ocn - coupler group and comm
751  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
752  MPI_Group_free( &joinOcnCouGroup );
753 #endif
754 
755 #ifdef ENABLE_ATMLND_COUPLING
756  if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm );
757  // free land - coupler group and comm
758  if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm );
759  MPI_Group_free( &joinLndCouGroup );
760 #endif
761 
762  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
763 
764  MPI_Group_free( &atmPEGroup );
765 #ifdef ENABLE_ATMOCN_COUPLING
766  MPI_Group_free( &ocnPEGroup );
767 #endif
768 #ifdef ENABLE_ATMLND_COUPLING
769  MPI_Group_free( &lndPEGroup );
770 #endif
771  MPI_Group_free( &couPEGroup );
772  MPI_Group_free( &jgroup );
773 
774  MPI_Finalize();
775  // endif #if 0
776 
777  return 0;
778 }