MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_coupler_twohop.cpp File Reference
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "MBParallelConventions.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"
#include "moab/CpuTimer.hpp"
#include "moab/ProgOptions.hpp"
#include <iostream>
#include <sstream>
#include "imoab_coupler_utils.hpp"
+ Include dependency graph for imoab_coupler_twohop.cpp:

Go to the source code of this file.

Macros

#define ENABLE_ATMOCN_COUPLING
 
#define ENABLE_ATMCPLOCN_COUPLING
 

Functions

int main (int argc, char *argv[])
 

Macro Definition Documentation

◆ ENABLE_ATMCPLOCN_COUPLING

#define ENABLE_ATMCPLOCN_COUPLING

Definition at line 42 of file imoab_coupler_twohop.cpp.

◆ ENABLE_ATMOCN_COUPLING

#define ENABLE_ATMOCN_COUPLING

Definition at line 41 of file imoab_coupler_twohop.cpp.

Function Documentation

◆ main()

int main ( int  argc,
char *  argv[] 
)

we will use the element global id, which should uniquely identify the element

Definition at line 48 of file imoab_coupler_twohop.cpp.

49 {
50  int ierr;
52  MPI_Group jgroup;
53  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
54  std::string readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
55 
56  // Timer data
57  moab::CpuTimer timer;
58  double timer_ops;
59  std::string opName;
60 
61  int repartitioner_scheme = 0;
62 #ifdef MOAB_HAVE_ZOLTAN
63  repartitioner_scheme = 2; // use the graph partitioner in that caseS
64 #endif
65 
66  MPI_Init( &argc, &argv );
67  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
68  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
69 
70  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
71 
72  std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
73  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
74  // intx atm/ocn is not in e3sm yet, give a number
75  // 6 * 100+ 18 = 618 : atmocnid
76  // 9 LND, 10 CPLLND
77  // 6 * 100 + 10 = 610 atmlndid:
78  // cmpatm is for atm on atm pes
79  // cmpocn is for ocean, on ocean pe
80  // cplatm is for atm on coupler pes
81  // cplocn is for ocean on coupelr pes
82  // atmocnid is for intx atm / ocn on coupler pes
83  //
84  int rankInAtmComm = -1;
85  int cmpatm = 5,
86  cplatm = 6; // component ids are unique over all pes, and established in advance;
87 #ifdef ENABLE_ATMOCN_COUPLING
88  std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
89  std::string baseline = TestDir + "unittest/baseline1.txt";
90  int rankInOcnComm = -1;
91  int cmpocn = 17, cplocn = 18,
92  atmocnid = 618; // component ids are unique over all pes, and established in advance;
93 #endif
94 
95 #ifdef ENABLE_ATMCPLOCN_COUPLING
96  int cplatm2 = 10,
97  atm2ocnid = 610; // component ids are unique over all pes, and established in advance;
98 #endif
99 
100  int rankInCouComm = -1;
101 
102  int nghlay = 0; // number of ghost layers for loading the file
103  std::vector< int > groupTasks;
104  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1,
105  endG2 = numProcesses - 1; // Support launch of imoab_coupler test on any combo of 2*x processes
106  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
107  int context_id = -1; // used now for freeing buffers
108 
109  // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
110  // later, we need to compute weight matrix with tempestremap
111 
112  ProgOptions opts;
113  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
114 #ifdef ENABLE_ATMOCN_COUPLING
115  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
116 #endif
117  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
118  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
119 #ifdef ENABLE_ATMOCN_COUPLING
120  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
121  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
122 #endif
123 
124  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
125  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
126 
127  opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
128 
129  int n = 1; // number of send/receive / project / send back cycles
130  opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
131 
132  bool no_regression_test = false;
133  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
134  opts.parseCommandLine( argc, argv );
135 
136  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
137 
138  if( !rankInGlobalComm )
139  {
140  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 <<
141 #ifdef ENABLE_ATMOCN_COUPLING
142  "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 <<
143 #endif
144  "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n ";
145  }
146 
147  // load files on 3 different communicators, groups
148  // first groups has task 0, second group tasks 0 and 1
149  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
150  // first groups has task 0, second group tasks 0 and 1
151  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
152  MPI_Group atmPEGroup;
153  MPI_Comm atmComm;
154  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
155  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
156 
157 #ifdef ENABLE_ATMOCN_COUPLING
158  MPI_Group ocnPEGroup;
159  MPI_Comm ocnComm;
160  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
161  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
162 #endif
163 
164  // we will always have a coupler
165  MPI_Group couPEGroup;
166  MPI_Comm couComm;
167  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
168  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
169 
170  // atm_coupler
171  MPI_Group joinAtmCouGroup;
172  MPI_Comm atmCouComm;
173  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
174  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
175 
176 #ifdef ENABLE_ATMOCN_COUPLING
177  // ocn_coupler
178  MPI_Group joinOcnCouGroup;
179  MPI_Comm ocnCouComm;
180  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
181  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
182 #endif
183 
184  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
185  CHECKIERR( ierr, "Cannot initialize iMOAB" )
186 
187  int cmpAtmAppID = -1;
188  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
189  int cplAtmAppID = -1; // -1 means it is not initialized
190  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
191 #ifdef ENABLE_ATMOCN_COUPLING
192  int cmpOcnAppID = -1;
193  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
194  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
195  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
196  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
197 #endif
198 
199 #ifdef ENABLE_ATMCPLOCN_COUPLING
200  int cplAtm2AppID = -1; // -1 means it is not initialized
201  iMOAB_AppID cplAtm2PID = &cplAtm2AppID; // atm on second coupler PEs
202  int cplAtm2OcnAppID = -1; // -1 means it is not initialized
203  iMOAB_AppID cplAtm2OcnPID = &cplAtm2OcnAppID; // intx atm - lnd on coupler PEs
204 #endif
205 
206  if( couComm != MPI_COMM_NULL )
207  {
208  MPI_Comm_rank( couComm, &rankInCouComm );
209  // Register all the applications on the coupler PEs
210  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
211  cplAtmPID ); // atm on coupler pes
212  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
213 #ifdef ENABLE_ATMOCN_COUPLING
214  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
215  cplOcnPID ); // ocn on coupler pes
216  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
217 #endif
218 #ifdef ENABLE_ATMCPLOCN_COUPLING
219  ierr = iMOAB_RegisterApplication( "ATMX2", &couComm, &cplatm2,
220  cplAtm2PID ); // second atm on coupler pes
221  CHECKIERR( ierr, "Cannot register second ATM over coupler PEs" )
222 #endif
223  }
224 
225  if( atmComm != MPI_COMM_NULL )
226  {
227  MPI_Comm_rank( atmComm, &rankInAtmComm );
228  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
229  CHECKIERR( ierr, "Cannot register ATM App" )
230  }
231 
232 #ifdef ENABLE_ATMOCN_COUPLING
233  if( ocnComm != MPI_COMM_NULL )
234  {
235  MPI_Comm_rank( ocnComm, &rankInOcnComm );
236  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
237  CHECKIERR( ierr, "Cannot register OCN App" )
238  }
239 #endif
240 
241  // atm
242  ierr =
243  setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
244  &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
245  CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
246 
247 #ifdef ENABLE_ATMOCN_COUPLING
248  // ocean
249  ierr =
250  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
251  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
252  CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
253 
254 #endif // #ifdef ENABLE_ATMOCN_COUPLING
255 
256 #ifdef ENABLE_ATMCPLOCN_COUPLING
257 
258  if( atmComm != MPI_COMM_NULL )
259  {
260  // then send mesh to second coupler pes
261  ierr = iMOAB_SendMesh( cmpAtmPID, &atmCouComm, &couPEGroup, &cplatm2,
262  &repartitioner_scheme ); // send to coupler pes
263  CHECKIERR( ierr, "cannot send elements to coupler-2" )
264  }
265  // now, receive mesh, on coupler communicator; first mesh 1, atm
266  if( couComm != MPI_COMM_NULL )
267  {
268  ierr = iMOAB_ReceiveMesh( cplAtm2PID, &atmCouComm, &atmPEGroup,
269  &cmpatm ); // receive from component
270  CHECKIERR( ierr, "cannot receive elements on coupler app" )
271  }
272 
273  // we can now free the sender buffers
274  if( atmComm != MPI_COMM_NULL )
275  {
276  int context_id = cplatm;
277  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &context_id );
278  CHECKIERR( ierr, "cannot free buffers used to send atm mesh" )
279  }
280 
281  if( couComm != MPI_COMM_NULL && 1 == n )
282  { // write only for n==1 case
283  char outputFileLnd[] = "recvAtm2.h5m";
284  ierr = iMOAB_WriteMesh( cplAtm2PID, outputFileLnd, fileWriteOptions );
285  CHECKIERR( ierr, "cannot write second atm mesh after receiving" )
286  }
287 
288 #endif // #ifdef ENABLE_ATMCPLOCN_COUPLING
289 
290  MPI_Barrier( MPI_COMM_WORLD );
291 
292 #ifdef ENABLE_ATMOCN_COUPLING
293  if( couComm != MPI_COMM_NULL )
294  {
295  // now compute intersection between OCNx and ATMx on coupler PEs
296  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
297  CHECKIERR( ierr, "Cannot register atm/ocn intersection over coupler pes " )
298  }
299 #endif
300 #ifdef ENABLE_ATMCPLOCN_COUPLING
301  if( couComm != MPI_COMM_NULL )
302  {
303  // now compute intersection between LNDx and ATMx on coupler PEs
304  ierr = iMOAB_RegisterApplication( "ATM2OCN", &couComm, &atm2ocnid, cplAtm2OcnPID );
305  CHECKIERR( ierr, "Cannot register atm2/ocn intersection over coupler pes " )
306  }
307 #endif
308 
309  int disc_orders[3] = { 4, 1, 1 };
310  const std::string weights_identifiers[2] = { "scalar", "scalar-pc" };
311  const std::string disc_methods[3] = { "cgll", "fv", "pcloud" };
312  const std::string dof_tag_names[3] = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
313 #ifdef ENABLE_ATMOCN_COUPLING
314  if( couComm != MPI_COMM_NULL )
315  {
316  PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
317  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplOcnPID, cplAtmOcnPID );
318  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
319  // basically, atm was redistributed according to target (ocean) partition, to "cover" the
320  // ocean partitions check if intx valid, write some h5m intx file
321  CHECKIERR( ierr, "cannot compute intersection for atm/ocn" )
322  POP_TIMER( couComm, rankInCouComm )
323 #ifdef VERBOSE
324  char prefix[] = "intx_atmocn";
325  ierr = iMOAB_WriteLocalMesh( cplAtmOcnPID, prefix, strlen( prefix ) );
326  CHECKIERR( ierr, "failed to write local intx mesh" );
327 #endif
328  }
329 
330  if( atmCouComm != MPI_COMM_NULL )
331  {
332  // the new graph will be for sending data from atm comp to coverage mesh;
333  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
334  // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
335  // after this, the sending of tags from atm pes to coupler pes will use the new par comm
336  // graph, that has more precise info about what to send for ocean cover ; every time, we
337  // will
338  // use the element global id, which should uniquely identify the element
339  PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" )
340  ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
341  &cplocn ); // it happens over joint communicator
342  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
343  POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank
344  }
345 #endif
346 
347 #ifdef ENABLE_ATMCPLOCN_COUPLING
348  if( couComm != MPI_COMM_NULL )
349  {
350  PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
351  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtm2PID, cplOcnPID, cplAtm2OcnPID );
352  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
353  // basically, atm was redistributed according to target (ocean) partition, to "cover" the
354  // ocean partitions check if intx valid, write some h5m intx file
355  CHECKIERR( ierr, "cannot compute intersection for atm2/ocn" )
356  POP_TIMER( couComm, rankInCouComm )
357  // }
358  // if( atmCouComm != MPI_COMM_NULL )
359  // {
360  // the new graph will be for sending data from atm comp to coverage mesh for land mesh;
361  // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
362  // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh
363  // after this, the sending of tags from atm pes to coupler pes will use the new par comm
364  // graph, that has more precise info about what to send (specifically for land cover); every
365  // time,
366  /// we will use the element global id, which should uniquely identify the element
367  PUSH_TIMER( "Compute OCN coverage graph for ATM2 mesh" )
368  // Context: cplatm2 already holds the comm-graph for communicating between atm component and coupler2
369  // We just need to create a comm graph to internally transfer data from coupler atm to coupler ocean
370  // ierr = iMOAB_CoverageGraph( &couComm, cplAtm2PID, cplAtm2OcnPID, cplAtm2OcnPID, &cplatm2, &atm2ocnid,
371  // &cplocn ); // it happens over joint communicator
372  int type1 = 1;
373  int type2 = 1;
374  ierr = iMOAB_ComputeCommGraph( cplAtm2PID, cplAtm2OcnPID, &couComm, &couPEGroup, &couPEGroup, &type1, &type2,
375  &cplatm2, &atm2ocnid );
376  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean from atm2" )
377  POP_TIMER( couComm, rankInCouComm ) // hijack this rank
378  }
379 #endif
380 
381  MPI_Barrier( MPI_COMM_WORLD );
382 
383  int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 0, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
384 
385 #ifdef ENABLE_ATMOCN_COUPLING
386 #ifdef VERBOSE
387  if( couComm != MPI_COMM_NULL && 1 == n )
388  { // write only for n==1 case
389  char serialWriteOptions[] = ""; // for writing in serial
390  std::stringstream outf;
391  outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
392  std::string intxfile = outf.str(); // write in serial the intx file, for debugging
393  ierr = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
394  CHECKIERR( ierr, "cannot write intx file result" )
395  }
396 #endif
397 
398  if( couComm != MPI_COMM_NULL )
399  {
400  PUSH_TIMER( "Compute the projection weights with TempestRemap" )
401  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(),
402  disc_methods[0].c_str(), &disc_orders[0], disc_methods[1].c_str(),
403  &disc_orders[1], nullptr, &fNoBubble, &fMonotoneTypeID,
404  &fVolumetric, &fInverseDistanceMap, &fNoConserve, &fValidate,
405  dof_tag_names[0].c_str(), dof_tag_names[1].c_str() );
406  CHECKIERR( ierr, "cannot compute scalar projection weights" )
407  POP_TIMER( couComm, rankInCouComm )
408 
409  // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
410 #ifdef MOAB_HAVE_NETCDF
411  {
412  const std::string atmocn_map_file_name = "atm_ocn_map2.nc";
413  ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(),
414  atmocn_map_file_name.c_str() );
415  CHECKIERR( ierr, "failed to write map file to disk" );
416 
417  const std::string intx_from_file_identifier = "map-from-file";
418  int dummyCpl = -1;
419  int dummy_rowcol = -1;
420  int dummyType = 0;
421  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
422  intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() );
423  CHECKIERR( ierr, "failed to load map file from disk" );
424  }
425 #endif
426  }
427 
428 #endif
429 
430  MPI_Barrier( MPI_COMM_WORLD );
431 
432 #ifdef ENABLE_ATMCPLOCN_COUPLING
433  if( couComm != MPI_COMM_NULL )
434  {
435  PUSH_TIMER( "Compute the projection weights with TempestRemap for atm2/ocn" )
436  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtm2OcnPID, weights_identifiers[0].c_str(),
437  disc_methods[0].c_str(), &disc_orders[0], disc_methods[1].c_str(),
438  &disc_orders[1], nullptr, &fNoBubble, &fMonotoneTypeID,
439  &fVolumetric, &fInverseDistanceMap, &fNoConserve, &fValidate,
440  dof_tag_names[0].c_str(), dof_tag_names[1].c_str() );
441  CHECKIERR( ierr, "cannot compute scalar projection weights for atm2/ocn" )
442  POP_TIMER( couComm, rankInCouComm )
443 
444  // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
445 #ifdef MOAB_HAVE_NETCDF
446  {
447  const std::string atmocn_map_file_name = "atm2_ocn_map.nc";
448  ierr = iMOAB_WriteMappingWeightsToFile( cplAtm2OcnPID, weights_identifiers[0].c_str(),
449  atmocn_map_file_name.c_str() );
450  CHECKIERR( ierr, "failed to write map file to disk" );
451 
452  const std::string intx_from_file_identifier = "map2-from-file";
453  int dummyCpl = -1;
454  int dummy_rowcol = -1;
455  int dummyType = 0;
456  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtm2OcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
457  intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() );
458  CHECKIERR( ierr, "failed to load map file from disk" );
459  }
460 #endif
461  }
462 #endif
463 
464  int tagIndex[2];
465  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
466  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
467 
468  const char* bottomFields = "a2oTbot:a2oUbot:a2oVbot";
469  const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
470  const char* bottomSourceFields2 = "a2oT2bot_src:a2oU2bot_src:a2oV2bot_src";
471  const char* bottomProjectedFields3 = "a2oT2bot_proj:a2oU2bot_proj:a2oV2bot_proj";
472 
473  if( couComm != MPI_COMM_NULL )
474  {
475  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
476  CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
477  ierr = iMOAB_DefineTagStorage( cplAtm2PID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
478  CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
479 
480 #ifdef ENABLE_ATMOCN_COUPLING
481  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
482  CHECKIERR( ierr, "failed to define the field tag a2oTbot_proj" );
483 #endif
484 #ifdef ENABLE_ATMCPLOCN_COUPLING
485  ierr = iMOAB_DefineTagStorage( cplAtm2PID, bottomSourceFields2, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
486  CHECKIERR( ierr, "failed to define the field tag a2oT2bot_proj" );
487  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields3, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
488  CHECKIERR( ierr, "failed to define the field tag a2oT2bot_proj" );
489 #endif
490  }
491 
492  // need to make sure that the coverage mesh (created during intx method) received the tag that
493  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
494  // need to change the migrate method to accommodate any GLL tag
495  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
496  // (cplAtmPID), using the new coverage graph communicator
497 
498  // make the tag 0, to check we are actually sending needed data
499  {
500  if( cplAtmAppID >= 0 )
501  {
502  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
503  /*
504  * Each process in the communicator will have access to a local mesh instance, which
505  * will contain the original cells in the local partition and ghost entities. Number of
506  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
507  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
508  * numbers.
509  */
510  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
511  CHECKIERR( ierr, "failed to get num primary elems" );
512  int numAllElem = nelem[2];
513  std::vector< double > vals;
514  int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags
515  int eetype = 1;
516 
517  vals.resize( storLeng );
518  for( int k = 0; k < storLeng; k++ )
519  vals[k] = 0.;
520 
521  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
522  CHECKIERR( ierr, "cannot make tag nul" )
523  // set the tag to 0
524  }
525  if( cplAtm2AppID >= 0 )
526  {
527  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
528  /*
529  * Each process in the communicator will have access to a local mesh instance, which
530  * will contain the original cells in the local partition and ghost entities. Number of
531  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
532  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
533  * numbers.
534  */
535  ierr = iMOAB_GetMeshInfo( cplAtm2PID, nverts, nelem, nblocks, nsbc, ndbc );
536  CHECKIERR( ierr, "failed to get num primary elems" );
537  int numAllElem = nelem[2];
538  std::vector< double > vals;
539  int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags
540  int eetype = 1;
541 
542  vals.resize( storLeng );
543  for( int k = 0; k < storLeng; k++ )
544  vals[k] = 0.;
545 
546  ierr = iMOAB_SetDoubleTagStorage( cplAtm2PID, bottomSourceFields2, &storLeng, &eetype, &vals[0] );
547  CHECKIERR( ierr, "cannot make tag nul" )
548  // set the tag to 0
549  }
550  }
551 
552  // start a virtual loop for number of iterations
553  for( int iters = 0; iters < n; iters++ )
554  {
555 #ifdef ENABLE_ATMOCN_COUPLING
556  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
557  if( atmComm != MPI_COMM_NULL )
558  {
559  // as always, use nonblocking sends
560  // this is for projection to ocean:
561  ierr = iMOAB_SendElementTag( cmpAtmPID, bottomFields, &atmCouComm, &cplocn );
562  CHECKIERR( ierr, "cannot send tag values" )
563  }
564  if( couComm != MPI_COMM_NULL )
565  {
566  // receive on atm on coupler pes, that was redistributed according to coverage
567  ierr = iMOAB_ReceiveElementTag( cplAtmPID, bottomFields, &atmCouComm, &cplocn );
568  CHECKIERR( ierr, "cannot receive tag values" )
569  }
571 
572  // we can now free the sender buffers
573  if( atmComm != MPI_COMM_NULL )
574  {
575  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
576  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
577  }
578 #ifdef VERBOSE
579  if( couComm != MPI_COMM_NULL && 1 == n )
580  {
581  // write only for n==1 case
582  char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
583  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
584  CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
585  }
586 #endif
587 
588  if( couComm != MPI_COMM_NULL )
589  {
590  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
591  on the source mesh and get the projection on the target mesh */
592  PUSH_TIMER( "Apply Scalar projection weights" )
593  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), bottomFields,
594  bottomProjectedFields );
595  CHECKIERR( ierr, "failed to compute projection weight application" );
596  POP_TIMER( couComm, rankInCouComm )
597  if( 1 == n ) // write only for n==1 case
598  {
599  char outputFileTgt[] = "fOcnOnCpl1.h5m";
600  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
601  CHECKIERR( ierr, "could not write fOcnOnCpl1.h5m to disk" )
602  }
603  }
604 
605  // send the projected tag back to ocean pes, with send/receive tag
606  if( ocnComm != MPI_COMM_NULL )
607  {
608  int tagIndexIn2;
609  ierr =
610  iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
611  CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
612  "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" );
613  }
614  // send the tag to ocean pes, from ocean mesh on coupler pes
615  // from couComm, using common joint comm ocn_coupler
616  // as always, use nonblocking sends
617  // original graph (context is -1_
618  if( couComm != MPI_COMM_NULL )
619  {
620  // need to use ocean comp id for context
621  context_id = cmpocn; // id for ocean on comp
622  ierr = iMOAB_SendElementTag( cplOcnPID, bottomProjectedFields, &ocnCouComm, &context_id );
623  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
624  }
625 
626  // receive on component 2, ocean
627  if( ocnComm != MPI_COMM_NULL )
628  {
629  context_id = cplocn; // id for ocean on coupler
630  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, bottomProjectedFields, &ocnCouComm, &context_id );
631  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
632  }
633 
634  MPI_Barrier( MPI_COMM_WORLD );
635 
636  if( couComm != MPI_COMM_NULL )
637  {
638  context_id = cmpocn;
639  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
640  CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
641  }
642  if( ocnComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case
643  {
644  char outputFileOcn[] = "OcnWithProj.h5m";
645  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
646  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
647  // test results only for n == 1, for bottomTempProjectedField
648  if( !no_regression_test )
649  {
650  // the same as remap test
651  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
652  // first get GlobalIds from ocn, and fields:
653  int nverts[3], nelem[3];
654  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
655  CHECKIERR( ierr, "failed to get ocn mesh info" );
656  std::vector< int > gidElems;
657  gidElems.resize( nelem[2] );
658  std::vector< double > tempElems;
659  tempElems.resize( nelem[2] );
660  // get global id storage
661  const std::string GidStr = "GLOBAL_ID"; // hard coded too
662  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
663  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
664  CHECKIERR( ierr, "failed to define global id tag" );
665 
666  int ent_type = 1;
667  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
668  CHECKIERR( ierr, "failed to get global ids" );
669  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
670  CHECKIERR( ierr, "failed to get temperature field" );
671  int err_code = 1;
672  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
673  if( 0 == err_code )
674  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
675  }
676  }
677 #endif
678 
679 #ifdef ENABLE_ATMCPLOCN_COUPLING
680  PUSH_TIMER( "Send/receive data from atm2 component to coupler of all data" )
681  if( atmComm != MPI_COMM_NULL )
682  {
683  // as always, use nonblocking sends
684  // this is for projection to ocean:
685  ierr = iMOAB_SendElementTag( cmpAtmPID, bottomFields, &atmCouComm, &cplatm2 );
686  CHECKIERR( ierr, "cannot send tag values" )
687  }
688  if( couComm != MPI_COMM_NULL )
689  {
690  // receive on atm on coupler pes, that was redistributed according to coverage
691  ierr = iMOAB_ReceiveElementTag( cplAtm2PID, bottomFields, &atmCouComm, &cmpatm );
692  CHECKIERR( ierr, "cannot receive tag values" )
693  }
695 
696  // we can now free the sender buffers
697  if( atmComm != MPI_COMM_NULL )
698  {
699  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplatm ); // context is for ocean
700  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
701  }
702  // #ifdef VERBOSE
703  if( couComm != MPI_COMM_NULL && 1 == n )
704  { // write only for n==1 case
705  char outputFileRecvd[] = "recvAtm2CoupFull.h5m";
706  ierr = iMOAB_WriteMesh( cplAtm2PID, outputFileRecvd, fileWriteOptions );
707  CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
708  }
709  // #endif
710 
711  PUSH_TIMER( "Send/receive data from atm2 coupler to ocean coupler based on coverage data" )
712  if( couComm != MPI_COMM_NULL )
713  {
714  // as always, use nonblocking sends
715  // this is for projection to ocean:
716  ierr = iMOAB_SendElementTag( cplAtm2PID, bottomFields, &couComm, &atm2ocnid );
717  CHECKIERR( ierr, "cannot send tag values" )
718 
719  // receive on atm on coupler pes, that was redistributed according to coverage
720  // receive in the coverage mesh, basically
721  ierr = iMOAB_ReceiveElementTag( cplAtm2OcnPID, bottomSourceFields2, &couComm, &cplatm2 );
722  CHECKIERR( ierr, "cannot receive tag values" )
723  }
725 
726  // we can now free the sender buffers
727  if( couComm != MPI_COMM_NULL )
728  {
729  ierr = iMOAB_FreeSenderBuffers( cplAtm2PID, &atm2ocnid ); // context is intx external id
730  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
731  }
732  // #ifdef VERBOSE
733  // we should not write this one, is should be the same as recvAtm2CoupFull above
734  /* if( couComm != MPI_COMM_NULL && 1 == n )
735  { // write only for n==1 case
736  char outputFileRecvd[] = "recvAtm2CoupOcnCtx.h5m";
737  ierr = iMOAB_WriteMesh( cplAtm2PID, outputFileRecvd, fileWriteOptions );
738  CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
739  }
740 // #endif*/
741 
742  if( couComm != MPI_COMM_NULL )
743  {
744  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
745  on the source mesh and get the projection on the target mesh */
746  PUSH_TIMER( "Apply Scalar projection weights" )
747  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtm2OcnPID, weights_identifiers[0].c_str(),
748  bottomSourceFields2, bottomProjectedFields3 );
749  CHECKIERR( ierr, "failed to compute projection weight application" );
750  POP_TIMER( couComm, rankInCouComm )
751  if( 1 == n ) // write only for n==1 case
752  {
753  char outputFileTgt[] = "fOcnOnCpl0.h5m";
754  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
755  CHECKIERR( ierr, "could not write the second fOcnOnCpl0.h5m to disk" )
756  }
757  }
758 
759  std::cout << "applied scalar projection\n";
760  // send the projected tag back to ocean pes, with send/receive tag
761  if( ocnComm != MPI_COMM_NULL )
762  {
763  int tagIndexIn2;
764  ierr =
765  iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields3, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
766  CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
767  "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" );
768  }
769  std::cout << "defined tag agian on ocn\n";
770  // send the tag to ocean pes, from ocean mesh on coupler pes
771  // from couComm, using common joint comm ocn_coupler
772  // as always, use nonblocking sends
773  // original graph (context is -1_
774  if( couComm != MPI_COMM_NULL )
775  {
776  // need to use ocean comp id for context
777  context_id = cmpocn; // id for ocean on comp
778  ierr = iMOAB_SendElementTag( cplOcnPID, bottomProjectedFields3, &ocnCouComm, &context_id );
779  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
780  }
781  std::cout << "sent ocn data from coupler to component\n";
782 
783  // receive on component 2, ocean
784  if( ocnComm != MPI_COMM_NULL )
785  {
786  context_id = cplocn; // id for ocean on coupler
787  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, bottomProjectedFields3, &ocnCouComm, &context_id );
788  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
789  }
790  std::cout << "received ocn data from coupler to component\n";
791 
792  MPI_Barrier( MPI_COMM_WORLD );
793 
794  if( couComm != MPI_COMM_NULL )
795  {
796  context_id = cmpocn;
797  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
798  CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
799  }
800  std::cout << "freed send/recv ocn data from coupler to component\n";
801  if( ocnComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case
802  {
803  char outputFileOcn[] = "Ocn2WithProj.h5m";
804  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
805  CHECKIERR( ierr, "could not write Ocn2WithProj.h5m to disk" )
806  // test results only for n == 1, for bottomTempProjectedField
807  if( !no_regression_test )
808  {
809  // the same as remap test
810  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
811  // first get GlobalIds from ocn, and fields:
812  int nverts[3], nelem[3];
813  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
814  CHECKIERR( ierr, "failed to get ocn mesh info" );
815  std::vector< int > gidElems;
816  gidElems.resize( nelem[2] );
817  std::vector< double > tempElems;
818  tempElems.resize( nelem[2] );
819  // get global id storage
820  const std::string GidStr = "GLOBAL_ID"; // hard coded too
821  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
822  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
823  CHECKIERR( ierr, "failed to define global id tag" );
824 
825  int ent_type = 1;
826  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
827  CHECKIERR( ierr, "failed to get global ids" );
828  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
829  CHECKIERR( ierr, "failed to get temperature field" );
830  int err_code = 1;
831  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
832  if( 0 == err_code )
833  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
834  }
835 
836  std::cout << "wrote ocn data on component to disk\n";
837  }
838 #endif // ENABLE_ATMCPLOCN_COUPLING
839 
840  } // end loop iterations n
841 #ifdef ENABLE_ATMCPLOCN_COUPLING
842  if( couComm != MPI_COMM_NULL )
843  {
844  ierr = iMOAB_DeregisterApplication( cplAtm2OcnPID );
845  CHECKIERR( ierr, "cannot deregister app intx AO" )
846  }
847 #endif // ENABLE_ATMCPLOCN_COUPLING
848 
849 #ifdef ENABLE_ATMOCN_COUPLING
850  if( couComm != MPI_COMM_NULL )
851  {
852  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
853  CHECKIERR( ierr, "cannot deregister app intx AO" )
854  }
855  if( ocnComm != MPI_COMM_NULL )
856  {
857  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
858  CHECKIERR( ierr, "cannot deregister app OCN1" )
859  }
860 #endif // ENABLE_ATMOCN_COUPLING
861 
862  if( atmComm != MPI_COMM_NULL )
863  {
864  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
865  CHECKIERR( ierr, "cannot deregister app ATM1" )
866  }
867 
868 #ifdef ENABLE_ATMOCN_COUPLING
869  if( couComm != MPI_COMM_NULL )
870  {
871  ierr = iMOAB_DeregisterApplication( cplOcnPID );
872  CHECKIERR( ierr, "cannot deregister app OCNX" )
873  }
874 #endif // ENABLE_ATMOCN_COUPLING
875 
876  if( couComm != MPI_COMM_NULL )
877  {
878  ierr = iMOAB_DeregisterApplication( cplAtmPID );
879  CHECKIERR( ierr, "cannot deregister app ATMX" )
880  }
881 
882  //#endif
883  ierr = iMOAB_Finalize();
884  CHECKIERR( ierr, "did not finalize iMOAB" )
885 
886  // free atm coupler group and comm
887  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
888  MPI_Group_free( &joinAtmCouGroup );
889  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
890 
891 #ifdef ENABLE_ATMOCN_COUPLING
892  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
893  // free ocn - coupler group and comm
894  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
895  MPI_Group_free( &joinOcnCouGroup );
896 #endif
897 
898  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
899 
900  MPI_Group_free( &atmPEGroup );
901 #ifdef ENABLE_ATMOCN_COUPLING
902  MPI_Group_free( &ocnPEGroup );
903 #endif
904  MPI_Group_free( &couPEGroup );
905  MPI_Group_free( &jgroup );
906 
907  MPI_Finalize();
908 
909  return 0;
910 }

References ProgOptions::addOpt(), atmFilename, CHECKIERR, cmpatm, create_group_and_comm(), create_joint_comm_group(), DENSE_DOUBLE, DENSE_INTEGER, endG1, endG2, fileWriteOptions(), groupTasks, ierr, iMOAB_AppID, iMOAB_DefineTagStorage(), iMOAB_DeregisterApplication(), iMOAB_Finalize(), iMOAB_GetDoubleTagStorage(), iMOAB_GetIntTagStorage(), iMOAB_GetMeshInfo(), iMOAB_Initialize(), iMOAB_RegisterApplication(), iMOAB_SetDoubleTagStorage(), iMOAB_WriteLocalMesh(), iMOAB_WriteMesh(), jgroup, MPI_COMM_WORLD, nghlay, numProcesses, ProgOptions::parseCommandLine(), POP_TIMER, PUSH_TIMER, rankInAtmComm, rankInGlobalComm, readopts(), setup_component_coupler_meshes(), startG1, and startG2.