MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_apg2_ol_coupler.cpp
Go to the documentation of this file.
1 /**
2  * imoab_apg2_ol_coupler.cpp
3  *
4  * This imoab_apg2_ol_coupler test will simulate coupling between 3 components
5  * meshes will be loaded from 3 files (atm phys + atm pg2, ocean, and land), and
6  * Atm and ocn will be migrated to coupler pes and compute intx on them
7  * then, intx will be performed between migrated meshes
8  * and weights will be generated, such that a field from phys atm will be transferred to ocn
9  * currently, the phys atm will send some data to be projected to ocean
10  * similar for land, but land will be intersected too with the atm pg2
11  * Land is defined by the domain mesh
12  *
13  * first, intersect atm and ocn, and recompute comm graph 1 between atm phys and atm_cx, for ocn intx
14  * repeat the same for land; for atm/lnd coupling will use similar Fv -Fv maps; maybe later will identify some
15  * equivalent to bilinear maps
16  */
17 
18 #include "moab/Core.hpp"
19 #ifndef MOAB_HAVE_MPI
20 #error imoab coupler test requires MPI configuration
21 #endif
22 
23 // MPI includes
24 #include "moab_mpi.h"
25 #include "moab/ParallelComm.hpp"
26 #include "MBParallelConventions.h"
27 
28 #include "moab/iMOAB.h"
29 #include "TestUtil.hpp"
30 #include "moab/CpuTimer.hpp"
31 #include "moab/ProgOptions.hpp"
32 #include <iostream>
33 #include <sstream>
34 
35 #include "imoab_coupler_utils.hpp"
36 
37 using namespace moab;
38 
39 // #define VERBOSE
40 
41 #ifndef MOAB_HAVE_TEMPESTREMAP
42 #error The climate coupler test example requires MOAB configuration with TempestRemap
43 #endif
44 
45 #define ENABLE_ATMOCN_COUPLING
46 // for land coupling with phys atm, we do not have to "compute" intersection
47 // it is enough to know that the ids are the same, we can project from atm to land that way, using a computed graph
48 #define ENABLE_ATMLND_COUPLING
49 
50 int main( int argc, char* argv[] )
51 {
52  int ierr;
54  MPI_Group jgroup;
55  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
56  std::string readoptsPhysAtm( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
57 
58  /*if (argc < 2)
59  return 0; // no test; we will force naming the land domain file*/
60  // Timer data
61  moab::CpuTimer timer;
62  double timer_ops;
63  std::string opName;
64 
65  int repartitioner_scheme = 0;
66 #ifdef MOAB_HAVE_ZOLTAN
67  repartitioner_scheme = 2; // use the geometry partitioner in that case
68 #endif
69 
70  MPI_Init( &argc, &argv );
71  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
72  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
73 
74  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
75 
76  std::string atmFilename =
77  "../../sandbox/MeshFiles/e3sm/ne4pg2_o240/ne4pg2_p8.h5m"; // we should use only mesh from here
78  std::string atmPhysMesh =
79  "../../sandbox/MeshFiles/e3sm/ne4pg2_o240/AtmPhys_pg2.h5m"; // it has some data associated to vertices, T_ph,
80  // u_ph, v_ph
81  // we will eventually project that data to ocean mesh, after intx atm/ocn
82 
83  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
84  // intx atm/ocn is not in e3sm yet, give a number
85  // 6 * 100 + 18 = 618 : atmocnid
86  // 18 * 100 + 6 = 1806 : ocnatmid on coupler pes!
87  // 9 LND, 10 CPLLND
88  // 6 * 100 + 10 = 610 atmlndid:
89  // 10 * 100 + 6 = 1006 lndatmid: on coupler pes
90  // cmpatm is for atm on atm pes
91  // cmpocn is for ocean, on ocean pe
92  // cplatm is for atm on coupler pes
93  // cplocn is for ocean on coupler pes
94  // atmocnid is for intx atm / ocn on coupler pes
95  //
96  int rankInAtmComm = -1;
97  int cmpatm = 5, cplatm = 6; // component ids are unique over all pes, and established in advance;
98  int cmpPhysAtm = 105; // different from atm spectral ?
99 #ifdef ENABLE_ATMOCN_COUPLING
100  std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
101  int rankInOcnComm = -1;
102  int cmpocn = 17, cplocn = 18, atmocnid = 618,
103  ocnatmid = 1806; // component ids are unique over all pes, and established in advance;
104 #endif
105 #ifdef ENABLE_ATMLND_COUPLING
106  std::string lndFilename = "../../sandbox/MeshFiles/e3sm/ne4pg2_o240/land_p8.h5m";
107  int rankInLndComm = -1;
108  int cpllnd = 10, cmplnd = 9, atmlndid = 610,
109  lndatmid = 1006; // component ids are unique over all pes, and established in advance;
110 #endif
111 
112  int rankInCouComm = -1;
113 
114  int nghlay = 0; // number of ghost layers for loading the file
115  std::vector< int > groupTasks;
116  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1, endG3 = endG1;
117  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
118  int context_id = -1; // used now for freeing buffers
119 
120  // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
121  // later, we need to compute weight matrix with tempestremap
122 
123  ProgOptions opts;
124  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
125 #ifdef ENABLE_ATMOCN_COUPLING
126  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
127 #endif
128 
129 #ifdef ENABLE_ATMLND_COUPLING
130  opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename );
131 #endif
132 
133  opts.addOpt< std::string >( "physgrid,q", "physics grid file", &atmPhysMesh );
134 
135  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
136  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
137 #ifdef ENABLE_ATMOCN_COUPLING
138  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
139  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
140 #endif
141 
142 #ifdef ENABLE_ATMLND_COUPLING
143  opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 );
144  opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 );
145 #endif
146 
147  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
148  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
149 
150  opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
151 
152  opts.parseCommandLine( argc, argv );
153 
154  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
155 
156  if( !rankInGlobalComm )
157  {
158  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 <<
159 #ifdef ENABLE_ATMOCN_COUPLING
160  "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 <<
161 #endif
162 #ifdef ENABLE_ATMLND_COUPLING
163  "\n land file: " << lndFilename << "\n on tasks : " << startG3 << ":" << endG3 <<
164 #endif
165 
166  "\n atm phys file: " << atmPhysMesh << "\n on tasks : " << startG1 << ":" << endG1 <<
167 
168  "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n ";
169  }
170  // load files on 3 different communicators, groups
171  // first groups has task 0, second group tasks 0 and 1
172  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
173  MPI_Group atmPEGroup;
174  MPI_Comm atmComm;
175  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
176  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
177 
178 #ifdef ENABLE_ATMOCN_COUPLING
179  MPI_Group ocnPEGroup;
180  MPI_Comm ocnComm;
181  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
182  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
183 #endif
184 
185 #ifdef ENABLE_ATMLND_COUPLING
186  MPI_Group lndPEGroup;
187  MPI_Comm lndComm;
188  ierr = create_group_and_comm( startG3, endG3, jgroup, &lndPEGroup, &lndComm );
189  CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " )
190 #endif
191 
192  // we will always have a coupler
193  MPI_Group couPEGroup;
194  MPI_Comm couComm;
195  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
196  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
197 
198  // now, create the joint communicators atm_coupler, ocn_coupler, lnd_coupler
199  // for each, we will have to create the group first, then the communicator
200 
201  // atm_coupler
202  MPI_Group joinAtmCouGroup;
203  MPI_Comm atmCouComm;
204  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
205  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
206 
207 #ifdef ENABLE_ATMOCN_COUPLING
208  // ocn_coupler
209  MPI_Group joinOcnCouGroup;
210  MPI_Comm ocnCouComm;
211  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
212  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
213 #endif
214 
215 #ifdef ENABLE_ATMLND_COUPLING
216  // lnd_coupler
217  MPI_Group joinLndCouGroup;
218  MPI_Comm lndCouComm;
219  ierr = create_joint_comm_group( lndPEGroup, couPEGroup, &joinLndCouGroup, &lndCouComm );
220  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
221 #endif
222 
223  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
224  CHECKIERR( ierr, "Cannot initialize iMOAB" )
225 
226  int cmpAtmAppID = -1;
227  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
228  int cplAtmAppID = -1; // -1 means it is not initialized
229  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
230 #ifdef ENABLE_ATMOCN_COUPLING
231  int cmpOcnAppID = -1;
232  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
233  int cplOcnAppID = -1, cplAtmOcnAppID = -1, cplOcnAtmAppID = -1; // -1 means it is not initialized
234  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
235  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm - ocn on coupler PEs
236  iMOAB_AppID cplOcnAtmPID = &cplOcnAtmAppID; // intx ocn - atm on coupler PEs
237 
238 #endif
239 
240 #ifdef ENABLE_ATMLND_COUPLING
241  int cmpLndAppID = -1;
242  iMOAB_AppID cmpLndPID = &cmpLndAppID; // lnd
243  int cplLndAppID = -1, cplAtmLndAppID = -1, cplLndAtmAppID = -1; // -1 means it is not initialized
244  iMOAB_AppID cplLndPID = &cplLndAppID; // land on coupler PEs
245  iMOAB_AppID cplAtmLndPID = &cplAtmLndAppID; // intx atm - lnd on coupler PEs will be similar to ocn atm intx
246 
247  iMOAB_AppID cplLndAtmPID = &cplLndAtmAppID;
248 #endif
249 
250  int cmpPhysAtmID = -1;
251  iMOAB_AppID cmpPhAtmPID = &cmpPhysAtmID; // phys atm; we do not need to move it to cpl!
252 
253  if( couComm != MPI_COMM_NULL )
254  {
255  MPI_Comm_rank( couComm, &rankInCouComm );
256  // Register all the applications on the coupler PEs
257  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm, cplAtmPID ); // atm on coupler pes
258  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
259 #ifdef ENABLE_ATMOCN_COUPLING
260  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn, cplOcnPID ); // ocn on coupler pes
261  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
262 #endif
263 
264 #ifdef ENABLE_ATMLND_COUPLING
265  ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd, cplLndPID ); // lnd on coupler pes
266  CHECKIERR( ierr, "Cannot register LND over coupler PEs" )
267 #endif
268  }
269 
270  if( atmComm != MPI_COMM_NULL )
271  {
272  MPI_Comm_rank( atmComm, &rankInAtmComm );
273  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
274  CHECKIERR( ierr, "Cannot register ATM App" )
275  }
276 
277 #ifdef ENABLE_ATMOCN_COUPLING
278  if( ocnComm != MPI_COMM_NULL )
279  {
280  MPI_Comm_rank( ocnComm, &rankInOcnComm );
281  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
282  CHECKIERR( ierr, "Cannot register OCN App" )
283  }
284 #endif
285 
286  // atm
287  ierr =
288  setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
289  &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
290  CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
291 
292  MPI_Barrier( MPI_COMM_WORLD );
293 
294 #ifdef ENABLE_ATMOCN_COUPLING
295  // ocean
296  ierr =
297  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
298  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
299  CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
300 
301  if( couComm != MPI_COMM_NULL )
302  {
303  char outputFileTgt3[] = "recvOcn3.h5m";
304  PUSH_TIMER( "Write migrated OCN mesh on coupler PEs" )
305  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
306  CHECKIERR( ierr, "cannot write ocn mesh after receiving" )
307  POP_TIMER( couComm, rankInCouComm )
308  }
309 #endif // #ifdef ENABLE_ATMOCN_COUPLING
310 
311  MPI_Barrier( MPI_COMM_WORLD );
312  // load phys atm mesh, with some data on it already
313  if( atmComm != MPI_COMM_NULL )
314  {
315 
316  ierr = iMOAB_RegisterApplication( "PhysAtm", &atmComm, &cmpPhysAtm, cmpPhAtmPID );
317  CHECKIERR( ierr, "Cannot register Phys Atm App " )
318 
319  // load the next component mesh
320  PUSH_TIMER( "Load Phys Atm mesh" )
321  ierr = iMOAB_LoadMesh( cmpPhAtmPID, atmPhysMesh.c_str(), readoptsPhysAtm.c_str(), &nghlay );
322  CHECKIERR( ierr, "Cannot load Atm Phys mesh on atm pes" )
323  POP_TIMER( atmComm, rankInAtmComm )
324 
325  int nverts[3], nelem[3];
326  ierr = iMOAB_GetMeshInfo( cmpPhAtmPID, nverts, nelem, 0, 0, 0 );
327  CHECKIERR( ierr, "failed to get mesh info" );
328  printf( "Phys Atm Component Mesh: %d vertices and %d elements\n", nverts[0], nelem[0] );
329  }
330 
331  MPI_Barrier( MPI_COMM_WORLD );
332 
333 #ifdef ENABLE_ATMLND_COUPLING
334  // land
335  if( lndComm != MPI_COMM_NULL )
336  {
337  ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID );
338  CHECKIERR( ierr, "Cannot register LND App " )
339  }
340  ierr = setup_component_coupler_meshes( cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm, &lndPEGroup, &couComm,
341  &couPEGroup, &lndCouComm, lndFilename, readoptsPhysAtm, nghlay,
342  repartitioner_scheme );
343 
344  if( couComm != MPI_COMM_NULL )
345  { // write only for n==1 case
346  char outputFileLnd[] = "recvLnd.h5m";
347  ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions );
348  CHECKIERR( ierr, "cannot write lnd mesh after receiving" )
349  }
350 
351 #endif // #ifdef ENABLE_ATMLND_COUPLING
352 
353 #ifdef ENABLE_ATMOCN_COUPLING
354  if( couComm != MPI_COMM_NULL )
355  {
356  // now compute intersection between OCNx and ATMx on coupler PEs
357  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
358  CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
359 
360  ierr = iMOAB_RegisterApplication( "OCNATM", &couComm, &ocnatmid, cplOcnAtmPID );
361  CHECKIERR( ierr, "Cannot register atm_ocn intx over coupler pes " )
362  }
363 #endif
364 
365 #ifdef ENABLE_ATMLND_COUPLING
366  if( couComm != MPI_COMM_NULL )
367  {
368  // now compute intersection between LNDx and ATMx on coupler PEs
369  ierr = iMOAB_RegisterApplication( "ATMLND", &couComm, &atmlndid, cplAtmLndPID );
370  CHECKIERR( ierr, "Cannot register atm_lnd intx over coupler pes " )
371  // now compute intersection between ATMx and LNDx on coupler PEs
372  ierr = iMOAB_RegisterApplication( "LNDATM", &couComm, &lndatmid, cplLndAtmPID );
373  CHECKIERR( ierr, "Cannot register lnd_atm intx over coupler pes " )
374  }
375 #endif
376 
377  const char* weights_identifiers[2] = { "scalar", "scalar-pc" };
378  int disc_orders[3] = { 4, 1, 1 };
379  const char* disc_methods[3] = { "cgll", "fv", "pcloud" };
380  const char* dof_tag_names[3] = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
381 #ifdef ENABLE_ATMOCN_COUPLING
382  if( couComm != MPI_COMM_NULL )
383  {
384  PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
385  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplOcnPID, cplAtmOcnPID );
386  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
387  // basically, atm was redistributed according to target (ocean) partition, to "cover" the ocean partitions
388  // check if intx valid, write some h5m intx file
389  CHECKIERR( ierr, "cannot compute intersection" )
390  POP_TIMER( couComm, rankInCouComm )
391 
392  PUSH_TIMER( "Compute OCN-ATM mesh intersection" )
393  ierr =
394  iMOAB_ComputeMeshIntersectionOnSphere( cplOcnPID, cplAtmPID, cplOcnAtmPID ); // coverage mesh was computed
395  CHECKIERR( ierr, "cannot compute intersection" )
396  POP_TIMER( couComm, rankInCouComm )
397  }
398 
399  if( atmCouComm != MPI_COMM_NULL )
400  {
401  // the new graph will be for sending data from atm comp to coverage mesh;
402  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
403  // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
404  // after this, the sending of tags from atm pes to coupler pes will use the new par comm graph, that has more
405  // precise info about what to send for ocean cover ; every time, we will
406  // use the element global id, which should uniquely identify the element
407  PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" )
408  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
409  &cplocn ); // it happens over joint communicator
410  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
411  POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank
412  }
413  if( ocnCouComm != MPI_COMM_NULL )
414  {
415  // now for the second intersection, ocn-atm; will be sending data from ocean to atm
416  // Can we reuse the intx atm-ocn? Not sure yet; we will compute everything again :(
417  // the new graph will be for sending data from ocn comp to coverage mesh over atm;
418  // it involves initial ocn app; cmpOcnPID; also migrated ocn mesh on coupler pes, cplOcnPID
419  // results are in cplOcnAtmPID, intx mesh; remapper also has some info about coverage mesh
420  // after this, the sending of tags from ocn pes to coupler pes will use the new par comm graph, that has more
421  // precise info about what to send for atm cover ; every time, we will
422  // use the element global id, which should uniquely identify the element
423  PUSH_TIMER( "Compute ATM coverage graph for OCN mesh" )
424  ierr = iMOAB_CoverageGraph( &ocnCouComm, cmpOcnPID, cplOcnPID, cplOcnAtmPID, &cmpocn, &cplocn,
425  &cplatm ); // it happens over joint communicator, ocean + coupler
426  CHECKIERR( ierr, "cannot recompute direct coverage graph for atm" )
427  POP_TIMER( ocnCouComm, rankInOcnComm ) // hijack this rank
428  }
429 
430  // need to compute graph between phys atm and atm/ocn intx coverage
431  if( atmCouComm != MPI_COMM_NULL )
432  {
433  int typeA = 2; // point cloud, phys mesh
434  int typeB = 3; // cells of atmosphere, dof based; maybe need another type for ParCommGraph graphtype ?
435  ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmOcnPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
436  &cmpPhysAtm, &atmocnid );
437  CHECKIERR( ierr, "cannot compute graph between phys grid on atm and intx between FV atm and ocn" )
438  }
439 
440  // also
441  // need to compute graph between ocn/atm intx and phys atm mesh
442  /*if( atmCouComm != MPI_COMM_NULL )
443  {
444  int typeA = 3; // cells of atmosphere, dof based; maybe need another type for ParCommGraph graphtype ?
445  int typeB = 2; // point cloud, phys mesh
446  ierr = iMOAB_ComputeCommGraph( cplOcnAtmPID, cmpPhAtmPID, &atmCouComm, &couPEGroup, &atmPEGroup, &typeA, &typeB,
447  &ocnatmid, &cmpatm );
448  }*/
449  // need to compute graph between atm on coupler and phys atm mesh on component
450  if( atmCouComm != MPI_COMM_NULL )
451  {
452  int typeA = 2; // point cloud, phys mesh
453  int typeB = 3; // cells of atmosphere, dof based; need another type for ParCommGraph graphtype ?
454  ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
455  &cmpPhysAtm, &cplatm );
456  CHECKIERR( ierr, "cannot compute graph between phys grid on atm and FV atm on coupler" )
457  }
458 #endif
459 
460 #ifdef ENABLE_ATMLND_COUPLING
461  if( couComm != MPI_COMM_NULL )
462  {
463  PUSH_TIMER( "Compute ATM-LND mesh intersection" )
464  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplLndPID, cplAtmLndPID );
465  CHECKIERR( ierr, "failed to compute atm - land intx for mapping" );
466  POP_TIMER( couComm, rankInCouComm )
467 
468  PUSH_TIMER( "Compute LND-ATM mesh intersection" )
469  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplLndPID, cplAtmPID, cplLndAtmPID );
470  CHECKIERR( ierr, "cannot compute intersection" )
471  POP_TIMER( couComm, rankInCouComm )
472  }
473  if( atmCouComm != MPI_COMM_NULL )
474  {
475  // the new graph will be for sending data from atm comp to coverage mesh for land mesh;
476  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
477  // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh
478  // after this, the sending of tags from atm pes to coupler pes will use the new par comm graph, that has more
479  // precise info about what to send (specifically for land cover); every time,
480  /// we will use the element global id, which should uniquely identify the element
481  PUSH_TIMER( "Compute LND coverage graph for ATM mesh" )
482  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmLndPID, &cmpatm, &cplatm,
483  &cpllnd ); // it happens over joint communicator
484  CHECKIERR( ierr, "cannot recompute direct coverage graph for land" )
485  POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank
486  }
487 
488  // we will compute comm graph between atm phys and land, directly;
489  // on the new TriGrid workflow, we do need intersection between atm and land;
490  if( atmCouComm != MPI_COMM_NULL )
491  {
492  int typeA = 2; // point cloud
493  int typeB = 3; // type 3 for land on coupler, based on global ids for land cells ?
494  ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmLndPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
495  &cmpPhysAtm, &atmlndid );
496  CHECKIERR( ierr, "cannot compute comm graph between atm and atm/lnd intersection" )
497  }
498 
499  // for reverse direction, lnd - atm intx:
500  if( lndCouComm != MPI_COMM_NULL )
501  {
502  // now for the second intersection, lnd-atm; will be sending data from lnd to atm
503  // Can we reuse the intx atm-lnd? Not sure yet; we will compute everything again :(
504  // the new graph will be for sending data from lnd comp to coverage mesh over atm;
505  // it involves initial lnd app; cmpLndPID; also migrated lnd mesh on coupler pes, cplLndPID
506  // results are in cplLndAtmPID, intx mesh; remapper also has some info about coverage mesh
507  // after this, the sending of tags from lnd pes to coupler pes will use the new par comm graph, that has more
508  // precise info about what to send for atm cover ; every time, we will
509  // use the element global id, which should uniquely identify the element
510  PUSH_TIMER( "Compute ATM coverage graph for LND mesh" )
511  ierr = iMOAB_CoverageGraph( &lndCouComm, cmpLndPID, cplLndPID, cplLndAtmPID, &cmplnd, &cpllnd,
512  &cplatm ); // it happens over joint communicator, ocean + coupler
513  CHECKIERR( ierr, "cannot recompute direct coverage graph for atm for intx with land" )
514  POP_TIMER( lndCouComm, rankInLndComm )
515  }
516 #endif
517  MPI_Barrier( MPI_COMM_WORLD );
518 
519  int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
520 
521 #ifdef ENABLE_ATMOCN_COUPLING
522 #ifdef VERBOSE
523  if( couComm != MPI_COMM_NULL )
524  {
525  char serialWriteOptions[] = ""; // for writing in serial
526  std::stringstream outf;
527  outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
528  std::string intxfile = outf.str(); // write in serial the intx file, for debugging
529  ierr = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
530  CHECKIERR( ierr, "cannot write intx file result" )
531  }
532 #endif
533 
534  if( couComm != MPI_COMM_NULL )
535  {
536  PUSH_TIMER( "Compute the projection weights with TempestRemap" )
537  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], disc_methods[1],
538  &disc_orders[1], // fv
539  disc_methods[1], &disc_orders[1], // fv
540  nullptr, &fNoBubble, &fMonotoneTypeID, &fVolumetric,
541  &fInverseDistanceMap, &fNoConserve, &fValidate, dof_tag_names[1],
542  dof_tag_names[1] );
543  CHECKIERR( ierr, "cannot compute scalar projection weights" )
544  POP_TIMER( couComm, rankInCouComm )
545  }
546 
547  // now compute weight maps for ocn to atm mapping;
548  if( couComm != MPI_COMM_NULL )
549  {
550  PUSH_TIMER( "Compute the projection weights with TempestRemap for ocn - atm map" )
551  ierr = iMOAB_ComputeScalarProjectionWeights( cplOcnAtmPID, weights_identifiers[0], disc_methods[1],
552  &disc_orders[1], // fv
553  disc_methods[1], &disc_orders[1], // fv
554  nullptr, &fNoBubble, &fMonotoneTypeID, &fVolumetric,
555  &fInverseDistanceMap, &fNoConserve, &fValidate, dof_tag_names[1],
556  dof_tag_names[1] );
557  CHECKIERR( ierr, "cannot compute scalar projection weights" )
558  POP_TIMER( couComm, rankInCouComm )
559  }
560 
561 #endif
562 
563  MPI_Barrier( MPI_COMM_WORLD );
564 
565  // we do not need to compute weights ? just send tags between dofs ?
566 
567 #ifdef ENABLE_ATMLND_COUPLING
568  if( couComm != MPI_COMM_NULL )
569  {
570  // Compute the weights to project the solution from ATM component to LND component
571  PUSH_TIMER( "Compute ATM-LND remapping weights" )
572  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmLndPID, weights_identifiers[0], disc_methods[1],
573  &disc_orders[1], disc_methods[1], &disc_orders[1], nullptr,
574  &fNoBubble, &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap,
575  &fNoConserve, &fValidate, dof_tag_names[1], dof_tag_names[1] );
576  CHECKIERR( ierr, "failed to compute remapping projection weights for ATM-LND scalar non-conservative field" );
577  POP_TIMER( couComm, rankInCouComm )
578 
579  // Compute the weights to project the solution from LND component to ATM component
580  PUSH_TIMER( "Compute LND-ATM remapping weights" )
581  ierr = iMOAB_ComputeScalarProjectionWeights( cplLndAtmPID, weights_identifiers[0], disc_methods[1],
582  &disc_orders[1], disc_methods[1], &disc_orders[1], nullptr,
583  &fNoBubble, &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap,
584  &fNoConserve, &fValidate, dof_tag_names[1], dof_tag_names[1] );
585  CHECKIERR( ierr, "failed to compute remapping projection weights for LND-ATM scalar non-conservative field" );
586  POP_TIMER( couComm, rankInCouComm )
587  }
588 #endif
589 
590  int tagIndex[2];
591  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
592  int atmCompNDoFs = 1 /* FV disc_orders[0]*disc_orders[0] */, ocnCompNDoFs = 1 /*FV*/;
593 
594  const char* bottomFields = "T_ph:u_ph:v_ph"; // same as on phys atm mesh
595 
596  const char* bottomProjectedFields = "T_proj:u_proj:v_proj";
597 
598  // coming from ocn to atm, project back from T_proj
599  const char* bottomFields2 = "T2_ph:u2_ph:v2_ph"; // same as on phys atm mesh
600 
601  // coming from lnd to atm, project back from T_proj
602  const char* bottomFields3 = "T3_ph:u3_ph:v3_ph"; // same as on phys atm mesh
603 
604  // tags on phys grid atm mesh
605  const char* bottomPhProjectedFields = "Tph_proj:uph_proj:vph_proj";
606 
607  // tags on phys grid atm mesh
608  const char* bottomPhLndProjectedFields =
609  "TphL_proj:uphL_proj:vphL_proj"; // L and Lnd signify the fields projected from land
610 
611  if( couComm != MPI_COMM_NULL )
612  {
613  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
614  CHECKIERR( ierr, "failed to define the field tags T_ph, u_ph, v_ph " );
615 #ifdef ENABLE_ATMOCN_COUPLING
616 
617  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
618  CHECKIERR( ierr, "failed to define the field tags T_proj, u_proj, v_proj " );
619 #endif
620 
621 #ifdef ENABLE_ATMLND_COUPLING
622  // need to define tag storage for land; will use the same T_proj, u_proj, v_proj name, because it will be
623  // used to send between point clouds !
624  // use the same ndof and same size as ocnCompNDoFs (1) !!
625  ierr = iMOAB_DefineTagStorage( cplLndPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
626  CHECKIERR( ierr, "failed to define the field tags T_proj, u_proj, v_proj" );
627 #endif
628  }
629  // need to make sure that the coverage mesh (created during intx method) received the tag that need to be projected
630  // to target so far, the coverage mesh has only the ids and global dofs; need to change the migrate method to
631  // accommodate any GLL tag now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
632  // (cplAtmPID), using the new coverage graph communicator
633 
634  // make the tag 0, to check we are actually sending needed data
635  {
636  if( cplAtmAppID >= 0 )
637  {
638  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
639  /*
640  * Each process in the communicator will have access to a local mesh instance, which will contain the
641  * original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
642  * blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
643  * for local, ghost and total numbers.
644  */
645  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
646  CHECKIERR( ierr, "failed to get num primary elems" );
647  int numAllElem = nelem[2];
648  std::vector< double > vals;
649  int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags
650  vals.resize( storLeng );
651  for( int k = 0; k < storLeng; k++ )
652  vals[k] = 0.;
653  int eetype = 1;
654  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
655  CHECKIERR( ierr, "cannot make tag nul" )
656  // set the tag to 0
657  }
658  }
659 
660 #ifdef ENABLE_ATMOCN_COUPLING
661  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
662  if( atmComm != MPI_COMM_NULL )
663  {
664  // as always, use nonblocking sends
665  // this is for projection to ocean:
666  ierr = iMOAB_SendElementTag( cmpPhAtmPID, "T_ph:u_ph:v_ph", &atmCouComm, &atmocnid );
667  CHECKIERR( ierr, "cannot send tag values" )
668  }
669  if( couComm != MPI_COMM_NULL )
670  {
671  // receive on atm on coupler pes, that was redistributed according to coverage
672  ierr = iMOAB_ReceiveElementTag( cplAtmOcnPID, "T_ph:u_ph:v_ph", &atmCouComm, &cmpPhysAtm );
673  CHECKIERR( ierr, "cannot receive tag values on intx atm ocn" )
674  }
676 
677  // we can now free the sender buffers
678  if( atmComm != MPI_COMM_NULL )
679  {
680  ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &atmocnid ); // context is for ocean
681  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
682  }
683  /*
684  #ifdef VERBOSE
685  if (couComm != MPI_COMM_NULL) {
686  char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
687  ierr = iMOAB_WriteMesh(cplAtmPID, outputFileRecvd, fileWriteOptions );
688  }
689  #endif
690  */
691 
692  if( couComm != MPI_COMM_NULL )
693  {
694  const char* concat_fieldname = "T_ph:u_ph:v_ph";
695  const char* concat_fieldnameT = "T_proj:u_proj:v_proj";
696 
697  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
698  on the source mesh and get the projection on the target mesh */
699  PUSH_TIMER( "Apply Scalar projection weights" )
700  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], concat_fieldname,
701  concat_fieldnameT );
702  CHECKIERR( ierr, "failed to compute projection weight application" );
703  POP_TIMER( couComm, rankInCouComm )
704 
705  char outputFileTgt[] = "fOcnOnCpl3.h5m";
706  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
707  CHECKIERR( ierr, "failed to write fOcnOnCpl3.h5m " );
708  }
709  // send the projected tag back to ocean pes, with send/receive tag
710  if( ocnComm != MPI_COMM_NULL )
711  {
712  int tagIndexIn2;
713  ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
714  CHECKIERR( ierr,
715  "failed to define the field tag for receiving back the tag T_proj, u_proj, v_proj on ocn pes" );
716  }
717  // send the tag to ocean pes, from ocean mesh on coupler pes
718  // from couComm, using common joint comm ocn_coupler
719  // as always, use nonblocking sends
720  // original graph (context is -1_
721  if( couComm != MPI_COMM_NULL )
722  {
723  context_id = cmpocn;
724  ierr = iMOAB_SendElementTag( cplOcnPID, "T_proj:u_proj:v_proj", &ocnCouComm, &context_id );
725  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
726  }
727 
728  // receive on component 2, ocean
729  if( ocnComm != MPI_COMM_NULL )
730  {
731  context_id = cplocn;
732  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "T_proj:u_proj:v_proj", &ocnCouComm, &context_id );
733  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
734  }
735 
736  MPI_Barrier( MPI_COMM_WORLD );
737 
738  if( couComm != MPI_COMM_NULL )
739  {
740  context_id = cmpocn;
741  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
742  CHECKIERR( ierr, "cannot free buffers related to send tag" )
743  }
744  if( ocnComm != MPI_COMM_NULL )
745  {
746  char outputFileOcn[] = "OcnWithProj3.h5m";
747  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
748  CHECKIERR( ierr, "cannot write OcnWithProj3.h5m" )
749  }
750 #endif
751 
752  MPI_Barrier( MPI_COMM_WORLD );
753 
754 #ifdef ENABLE_ATMLND_COUPLING
755  // start land proj:
756 
757  // we used this to compute
758  // ierr = iMOAB_ComputeCommGraph(cmpPhAtmPID, cplLndPID, &atmCouComm, &atmPEGroup, &couPEGroup,
759  // &typeA, &typeB, &cmpPhysAtm, &atmlndid);
760 
761  // end copy
762  PUSH_TIMER( "Send/receive data from phys comp atm to coupler land, using computed graph" )
763  if( atmComm != MPI_COMM_NULL )
764  {
765 
766  // as always, use nonblocking sends
767  // this is for projection to land:
768  ierr = iMOAB_SendElementTag( cmpPhAtmPID, "T_ph:u_ph:v_ph", &atmCouComm, &atmlndid );
769  CHECKIERR( ierr, "cannot send tag values towards cpl on land" )
770  }
771  if( couComm != MPI_COMM_NULL )
772  {
773  // receive on lnd on coupler pes
774  ierr = iMOAB_ReceiveElementTag( cplAtmLndPID, "T_ph:u_ph:v_ph", &atmCouComm, &cmpPhysAtm );
775  CHECKIERR( ierr, "cannot receive tag values on land on coupler, for atm coupling" )
776  }
778 
779  // we can now free the sender buffers
780  if( atmComm != MPI_COMM_NULL )
781  {
782  ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &atmlndid );
783  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the land on coupler" )
784  }
785 
786  if( couComm != MPI_COMM_NULL )
787  {
788  const char* concat_fieldname = "T_ph:u_ph:v_ph";
789  const char* concat_fieldnameT = "T_proj:u_proj:v_proj";
790 
791  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
792  on the source mesh and get the projection on the target mesh */
793  PUSH_TIMER( "Apply Scalar projection weights" )
794  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmLndPID, weights_identifiers[0], concat_fieldname,
795  concat_fieldnameT );
796  CHECKIERR( ierr, "failed to compute projection weight application" );
797  POP_TIMER( couComm, rankInCouComm )
798 
799  char outputFileTgt[] = "fLndOnCpl3.h5m";
800  ierr = iMOAB_WriteMesh( cplLndPID, outputFileTgt, fileWriteOptions );
801  CHECKIERR( ierr, "cannot write land on coupler" )
802  }
803 
804  // end land proj
805  // send the tags back to land pes, from land mesh on coupler pes
806  // send from cplLndPID to cmpLndPID, using common joint comm
807  // as always, use nonblocking sends
808  // original graph
809  // int context_id = -1;
810  // the land might not have these tags yet; it should be a different name for land
811  // in e3sm we do have different names
812  if( lndComm != MPI_COMM_NULL )
813  {
814  int tagIndexIn2;
815  ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
816  CHECKIERR( ierr,
817  "failed to define the field tag for receiving back the tag T_proj, u_proj, v_proj on lnd pes" );
818  }
819  if( couComm != MPI_COMM_NULL )
820  {
821  context_id = cmplnd;
822  ierr = iMOAB_SendElementTag( cplLndPID, "T_proj:u_proj:v_proj", &lndCouComm, &context_id );
823  CHECKIERR( ierr, "cannot send tag values back to land pes" )
824  }
825  // receive on component 3, land
826  if( lndComm != MPI_COMM_NULL )
827  {
828  context_id = cpllnd;
829  ierr = iMOAB_ReceiveElementTag( cmpLndPID, "T_proj:u_proj:v_proj", &lndCouComm, &context_id );
830  CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" )
831  }
832 
833  MPI_Barrier( MPI_COMM_WORLD );
834  if( couComm != MPI_COMM_NULL )
835  {
836  context_id = cmplnd;
837  ierr = iMOAB_FreeSenderBuffers( cplLndPID, &context_id );
838  CHECKIERR( ierr, "cannot free buffers related to sending tags from coupler to land pes" )
839  }
840  if( lndComm != MPI_COMM_NULL )
841  {
842  char outputFileLnd[] = "LndWithProj3.h5m";
843  ierr = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions );
844  CHECKIERR( ierr, "cannot write land file with projection" )
845  }
846 
847  // we have received on ocean component some data projected from atm, on coupler
848 
849  // path was T_ph (atm phys) towards atm on FV mesh, we projected, then we sent back to ocean comp
850 // start copy ocn atm coupling; go reverse direction now !!
851 #ifdef ENABLE_ATMOCN_COUPLING
852  PUSH_TIMER( "Send/receive data from ocn component to coupler in atm context" )
853  if( ocnComm != MPI_COMM_NULL )
854  {
855  // as always, use nonblocking sends
856  // this is for projection to ocean:
857  ierr = iMOAB_SendElementTag( cmpOcnPID, "T_proj:u_proj:v_proj", &ocnCouComm, &cplatm );
858  CHECKIERR( ierr, "cannot send tag values T_proj, etc towards ocn coupler" )
859  }
860  if( couComm != MPI_COMM_NULL )
861  {
862  // receive on ocn on coupler pes, that was redistributed according to coverage
863  ierr = iMOAB_ReceiveElementTag( cplOcnPID, "T_proj:u_proj:v_proj", &ocnCouComm, &cplatm );
864  CHECKIERR( ierr, "cannot receive tag values" )
865  }
867 
868  // we can now free the sender buffers
869  if( ocnComm != MPI_COMM_NULL )
870  {
871  ierr = iMOAB_FreeSenderBuffers( cmpOcnPID, &cplatm ); // context is for atm
872  CHECKIERR( ierr, "cannot free buffers used to send ocn tag towards the coverage mesh for atm" )
873  }
874  //#ifdef VERBOSE
875  if( couComm != MPI_COMM_NULL )
876  {
877  // write only for n==1 case
878  char outputFileRecvd[] = "recvOcnCpl.h5m";
879  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileRecvd, fileWriteOptions );
880  CHECKIERR( ierr, "could not write recvOcnCplOcn.h5m to disk" )
881  }
882  //#endif
883 
884  if( couComm != MPI_COMM_NULL )
885  {
886  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
887  on the source mesh and get the projection on the target mesh */
888 
889  const char* concat_fieldname = "T_proj:u_proj:v_proj"; // this is now source tag
890  const char* concat_fieldnameT = "T2_ph:u2_ph:v2_ph"; // projected tag on
891  // make sure the new tags exist on atm coupler mesh;
892  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields2, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
893  CHECKIERR( ierr, "failed to define the field tags T2_ph, u2_ph, v2_ph" );
894 
895  PUSH_TIMER( "Apply Scalar projection weights" )
896  ierr = iMOAB_ApplyScalarProjectionWeights( cplLndAtmPID, weights_identifiers[0], concat_fieldname,
897  concat_fieldnameT );
898  CHECKIERR( ierr, "failed to compute projection weight application" );
899  POP_TIMER( couComm, rankInCouComm )
900 
901  char outputFileTgt[] = "fAtm2OnCpl2.h5m";
902  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileTgt, fileWriteOptions );
903  CHECKIERR( ierr, "could not write fAtm2OnCpl.h5m to disk" )
904  }
905 
906  // send the projected tag back to atm pes, with send/receive partial par graph computed
907  // from intx ocn/atm towards atm physics mesh !
908  if( atmComm != MPI_COMM_NULL )
909  {
910  int tagIndexIn2;
911  ierr =
912  iMOAB_DefineTagStorage( cmpPhAtmPID, bottomPhProjectedFields, &tagTypes[1], &atmCompNDoFs, &tagIndexIn2 );
913  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
914  "Tph_proj, etc on atm pes" );
915  }
916  // send the tag to atm pes, from atm pg2 mesh on coupler pes
917  // from couComm, using common joint comm atm_coupler, and computed graph, phys-grid -> atm FV on cpl, in reverse
918  // as always, use nonblocking sends
919  // graph context comes from commgraph ?
920 
921  // ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA,
922  // &typeB,
923  // &cmpatm, &cplatm );
924 
925  if( couComm != MPI_COMM_NULL )
926  {
927  context_id = cmpPhysAtm;
928  ierr = iMOAB_SendElementTag( cplAtmPID, "T2_ph:u2_ph:v2_ph", &atmCouComm, &context_id );
929  CHECKIERR( ierr, "cannot send tag values back to atm pes" )
930  }
931 
932  // receive on component atm phys mesh
933  if( atmComm != MPI_COMM_NULL )
934  {
935  context_id = cplatm;
936  ierr = iMOAB_ReceiveElementTag( cmpPhAtmPID, "Tph_proj:uph_proj:vph_proj", &atmCouComm, &context_id );
937  CHECKIERR( ierr, "cannot receive tag values from atm pg2 mesh on coupler pes" )
938  }
939 
940  MPI_Barrier( MPI_COMM_WORLD );
941 
942  if( couComm != MPI_COMM_NULL )
943  {
944  context_id = cmpatm;
945  ierr = iMOAB_FreeSenderBuffers( cplAtmPID, &context_id );
946  CHECKIERR( ierr, "cannot free buffers for sending T2_ph from cpl to phys atm" )
947  }
948  if( atmComm != MPI_COMM_NULL ) // write only for n==1 case
949  {
950  char outputFileAtmPh[] = "AtmPhysProj.h5m";
951  ierr = iMOAB_WriteMesh( cmpPhAtmPID, outputFileAtmPh, fileWriteOptions );
952  CHECKIERR( ierr, "could not write AtmPhysProj.h5m to disk" )
953  }
954 #endif
955  // end copy need more work for land
956 // start copy
957 // lnd atm coupling; go reverse direction now, from lnd to atm , using lnd - atm intx, weight gen
958 // send back the T_proj , etc , from lan comp to land coupler
959 #ifdef ENABLE_ATMLND_COUPLING
960  PUSH_TIMER( "Send/receive data from lnd component to coupler in atm context" )
961  if( lndComm != MPI_COMM_NULL )
962  {
963  // as always, use nonblocking sends
964  ierr = iMOAB_SendElementTag( cmpLndPID, "T_proj:u_proj:v_proj", &lndCouComm, &cplatm );
965  CHECKIERR( ierr, "cannot send tag values T_proj, etc towards lnd coupler" )
966  }
967  if( couComm != MPI_COMM_NULL )
968  {
969  // receive on ocn on coupler pes, that was redistributed according to coverage
970  ierr = iMOAB_ReceiveElementTag( cplLndPID, "T_proj:u_proj:v_proj", &lndCouComm, &cplatm );
971  CHECKIERR( ierr, "cannot receive tag values on land" )
972  }
974 
975  // we can now free the sender buffers
976  if( lndComm != MPI_COMM_NULL )
977  {
978  ierr = iMOAB_FreeSenderBuffers( cmpLndPID, &cplatm ); // context is for atm
979  CHECKIERR( ierr, "cannot free buffers used to send lnd tag towards the coverage mesh for atm" )
980  }
981  //#ifdef VERBOSE
982  if( couComm != MPI_COMM_NULL )
983  {
984 
985  char outputFileRecvd[] = "recvLndCpl.h5m";
986  ierr = iMOAB_WriteMesh( cplLndPID, outputFileRecvd, fileWriteOptions );
987  CHECKIERR( ierr, "could not write recvLndCpl.h5m to disk" )
988  }
989  //#endif
990 
991  if( couComm != MPI_COMM_NULL )
992  {
993  PUSH_TIMER( "Apply Scalar projection weights for lnd - atm coupling" )
994  const char* concat_fieldname = "T_proj:u_proj:v_proj"; // this is now source tag, on land cpl
995  const char* concat_fieldnameT = "T3_ph:u3_ph:v3_ph"; // projected tag on
996  // make sure the new tags exist on atm coupler mesh;
997  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields3, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
998  CHECKIERR( ierr, "failed to define the field tag T3_ph" );
999 
1000  ierr = iMOAB_ApplyScalarProjectionWeights( cplLndAtmPID, weights_identifiers[0], concat_fieldname,
1001  concat_fieldnameT );
1002  CHECKIERR( ierr, "failed to compute projection weight application from lnd to atm " );
1003  POP_TIMER( couComm, rankInCouComm )
1004 
1005  char outputFileTgt[] = "fAtm3OnCpl.h5m"; // this is for T3_ph, etc
1006  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileTgt, fileWriteOptions );
1007  CHECKIERR( ierr, "could not write fAtm3OnCpl.h5m to disk" )
1008  }
1009 
1010  // send the projected tag back to atm pes, with send/receive partial par graph computed
1011  // from intx lnd/atm towards atm physics mesh !
1012  if( atmComm != MPI_COMM_NULL )
1013  {
1014  int tagIndexIn2;
1015  ierr = iMOAB_DefineTagStorage( cmpPhAtmPID, bottomPhLndProjectedFields, &tagTypes[1], &atmCompNDoFs,
1016  &tagIndexIn2 );
1017  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
1018  "TphL_proj, on atm pes" );
1019  }
1020  // send the tag to atm pes, from atm pg2 mesh on coupler pes
1021  // from couComm, using common joint comm atm_coupler, and computed graph, phys-grid -> atm FV on cpl, in reverse
1022  // as always, use nonblocking sends
1023  // graph context comes from commgraph ?
1024 
1025  // ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA,
1026  // &typeB,
1027  // &cmpatm, &cplatm );
1028 
1029  if( couComm != MPI_COMM_NULL )
1030  {
1031  context_id = cmpPhysAtm;
1032  ierr = iMOAB_SendElementTag( cplAtmPID, "T3_ph:u3_ph:v3_ph", &atmCouComm, &context_id );
1033  CHECKIERR( ierr, "cannot send tag values back to atm pes" )
1034  }
1035 
1036  // receive on component atm phys mesh
1037  if( atmComm != MPI_COMM_NULL )
1038  {
1039  context_id = cplatm;
1040  ierr = iMOAB_ReceiveElementTag( cmpPhAtmPID, "TphL_proj:uphL_proj:vphL_proj", &atmCouComm, &context_id );
1041  CHECKIERR( ierr, "cannot receive tag values from atm pg2 mesh on coupler pes" )
1042  }
1043 
1044  MPI_Barrier( MPI_COMM_WORLD );
1045 
1046  if( couComm != MPI_COMM_NULL )
1047  {
1048  context_id = cmpatm;
1049  ierr = iMOAB_FreeSenderBuffers( cplAtmPID, &context_id );
1050  CHECKIERR( ierr, "cannot free buffers used for sending back atm tags " )
1051  }
1052  if( atmComm != MPI_COMM_NULL ) // write only for n==1 case
1053  {
1054  char outputFileAtmPh[] = "AtmPhysProj3.h5m";
1055  ierr = iMOAB_WriteMesh( cmpPhAtmPID, outputFileAtmPh, fileWriteOptions );
1056  CHECKIERR( ierr, "could not write AtmPhysProj3.h5m to disk" )
1057  }
1058 #endif
1059  // we could deregister cplLndAtmPID
1060  if( couComm != MPI_COMM_NULL )
1061  {
1062  ierr = iMOAB_DeregisterApplication( cplLndAtmPID );
1063  CHECKIERR( ierr, "cannot deregister app intx LA" )
1064  }
1065 
1066  // we could deregister cplAtmLndPID
1067  if( couComm != MPI_COMM_NULL )
1068  {
1069  ierr = iMOAB_DeregisterApplication( cplAtmLndPID );
1070  CHECKIERR( ierr, "cannot deregister app intx AL" )
1071  }
1072 #endif // ENABLE_ATMLND_COUPLING
1073 
1074 #ifdef ENABLE_ATMOCN_COUPLING
1075  if( couComm != MPI_COMM_NULL )
1076  {
1077  ierr = iMOAB_DeregisterApplication( cplOcnAtmPID );
1078  CHECKIERR( ierr, "cannot deregister app intx OA" )
1079  }
1080  if( couComm != MPI_COMM_NULL )
1081  {
1082  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
1083  CHECKIERR( ierr, "cannot deregister app intx AO" )
1084  }
1085 #endif
1086 
1087 #ifdef ENABLE_ATMLND_COUPLING
1088  if( lndComm != MPI_COMM_NULL )
1089  {
1090  ierr = iMOAB_DeregisterApplication( cmpLndPID );
1091  CHECKIERR( ierr, "cannot deregister app LND1" )
1092  }
1093 #endif // ENABLE_ATMLND_COUPLING
1094  if( atmComm != MPI_COMM_NULL )
1095  {
1096  ierr = iMOAB_DeregisterApplication( cmpPhAtmPID );
1097  CHECKIERR( ierr, "cannot deregister app PhysAtm " )
1098  }
1099 #ifdef ENABLE_ATMOCN_COUPLING
1100  if( ocnComm != MPI_COMM_NULL )
1101  {
1102  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
1103  CHECKIERR( ierr, "cannot deregister app OCN1" )
1104  }
1105 #endif // ENABLE_ATMOCN_COUPLING
1106 
1107  if( atmComm != MPI_COMM_NULL )
1108  {
1109  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
1110  CHECKIERR( ierr, "cannot deregister app ATM1" )
1111  }
1112 
1113 #ifdef ENABLE_ATMLND_COUPLING
1114  if( couComm != MPI_COMM_NULL )
1115  {
1116  ierr = iMOAB_DeregisterApplication( cplLndPID );
1117  CHECKIERR( ierr, "cannot deregister app LNDX" )
1118  }
1119 #endif // ENABLE_ATMLND_COUPLING
1120 
1121 #ifdef ENABLE_ATMOCN_COUPLING
1122  if( couComm != MPI_COMM_NULL )
1123  {
1124  ierr = iMOAB_DeregisterApplication( cplOcnPID );
1125  CHECKIERR( ierr, "cannot deregister app OCNX" )
1126  }
1127 #endif // ENABLE_ATMOCN_COUPLING
1128 
1129  if( couComm != MPI_COMM_NULL )
1130  {
1131  ierr = iMOAB_DeregisterApplication( cplAtmPID );
1132  CHECKIERR( ierr, "cannot deregister app ATMX" )
1133  }
1134 
1135  //#endif
1136  ierr = iMOAB_Finalize();
1137  CHECKIERR( ierr, "did not finalize iMOAB" )
1138 
1139  // free atm coupler group and comm
1140  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
1141  MPI_Group_free( &joinAtmCouGroup );
1142  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
1143 
1144 #ifdef ENABLE_ATMOCN_COUPLING
1145  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
1146  // free ocn - coupler group and comm
1147  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
1148  MPI_Group_free( &joinOcnCouGroup );
1149 #endif
1150 
1151 #ifdef ENABLE_ATMLND_COUPLING
1152  if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm );
1153  // free land - coupler group and comm
1154  if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm );
1155  MPI_Group_free( &joinLndCouGroup );
1156 #endif
1157 
1158  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
1159 
1160  MPI_Group_free( &atmPEGroup );
1161 #ifdef ENABLE_ATMOCN_COUPLING
1162  MPI_Group_free( &ocnPEGroup );
1163 #endif
1164 #ifdef ENABLE_ATMLND_COUPLING
1165  MPI_Group_free( &lndPEGroup );
1166 #endif
1167  MPI_Group_free( &couPEGroup );
1168  MPI_Group_free( &jgroup );
1169 
1170  MPI_Finalize();
1171  // endif #if 0
1172 
1173  return 0;
1174 }