MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_coupler_bilin.cpp File Reference
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "MBParallelConventions.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"
#include "moab/CpuTimer.hpp"
#include "moab/ProgOptions.hpp"
#include <iostream>
#include <sstream>
#include "imoab_coupler_utils.hpp"
+ Include dependency graph for imoab_coupler_bilin.cpp:

Go to the source code of this file.

Macros

#define ENABLE_ATMOCN_COUPLING
 

Functions

int main (int argc, char *argv[])
 

Macro Definition Documentation

◆ ENABLE_ATMOCN_COUPLING

#define ENABLE_ATMOCN_COUPLING

Definition at line 47 of file imoab_coupler_bilin.cpp.

Function Documentation

◆ main()

int main ( int  argc,
char *  argv[] 
)

Definition at line 49 of file imoab_coupler_bilin.cpp.

50 {
51  int ierr;
53  MPI_Group jgroup;
54  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
55 
56  // Timer data
57  moab::CpuTimer timer;
58  double timer_ops;
59  std::string opName;
60 
61  int repartitioner_scheme = 0;
62 #ifdef MOAB_HAVE_ZOLTAN
63  repartitioner_scheme = 2; // use the graph partitioner in that caseS
64 #endif
65 
66  MPI_Init( &argc, &argv );
67  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
68  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
69 
70  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
71 
72  std::string atmFilename = TestDir + "unittest/atm_c2x.h5m";
73  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
74  // intx atm/ocn is not in e3sm yet, give a number
75  // 6 * 100+ 18 = 618 : atmocnid
76  // 9 LND, 10 CPLLND
77  // 6 * 100 + 10 = 610 atmlndid:
78  // cmpatm is for atm on atm pes
79  // cmpocn is for ocean, on ocean pe
80  // cplatm is for atm on coupler pes
81  // cplocn is for ocean on coupelr pes
82  // atmocnid is for intx atm / ocn on coupler pes
83  //
84  int rankInAtmComm = -1;
85  int cmpatm = 5,
86  cplatm = 6; // component ids are unique over all pes, and established in advance;
87 #ifdef ENABLE_ATMOCN_COUPLING
88  std::string ocnFilename = TestDir + "unittest/wholeOcn.h5m";
89  int rankInOcnComm = -1;
90  int cmpocn = 17, cplocn = 18,
91  atmocnid = 618; // component ids are unique over all pes, and established in advance;
92 #endif
93 
94  int rankInCouComm = -1;
95 
96  int nghlay = 0; // number of ghost layers for loading the file
97  std::vector< int > groupTasks;
98  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
99  // Support launch of imoab_coupler test on any combo of 2*x processes
100  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
101  int context_id; // used now for freeing buffers
102 
103  // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
104  // later, we need to compute weight matrix with tempestremap
105 
106  ProgOptions opts;
107  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
108 #ifdef ENABLE_ATMOCN_COUPLING
109  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
110  std::string baseline = TestDir + "unittest/baseline3.txt";
111 #endif
112  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
113  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
114 #ifdef ENABLE_ATMOCN_COUPLING
115  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
116  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
117 #endif
118 
119  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
120  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
121 
122  opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
123 
124  bool no_regression_test = false;
125  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 3", &no_regression_test );
126 
127  opts.parseCommandLine( argc, argv );
128 
129  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
130 
131  if( !rankInGlobalComm )
132  {
133  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 <<
134 #ifdef ENABLE_ATMOCN_COUPLING
135  "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 <<
136 #endif
137  "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n ";
138  }
139 
140  // load files on 3 different communicators, groups
141  // first groups has task 0, second group tasks 0 and 1
142  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
143  // first groups has task 0, second group tasks 0 and 1
144  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
145  MPI_Group atmPEGroup;
146  MPI_Comm atmComm;
147  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
148  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
149 
150 #ifdef ENABLE_ATMOCN_COUPLING
151  MPI_Group ocnPEGroup;
152  MPI_Comm ocnComm;
153  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
154  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
155 #endif
156 
157  // we will always have a coupler
158  MPI_Group couPEGroup;
159  MPI_Comm couComm;
160  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
161  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
162 
163  // atm_coupler
164  MPI_Group joinAtmCouGroup;
165  MPI_Comm atmCouComm;
166  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
167  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
168 
169 #ifdef ENABLE_ATMOCN_COUPLING
170  // ocn_coupler
171  MPI_Group joinOcnCouGroup;
172  MPI_Comm ocnCouComm;
173  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
174  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
175 #endif
176 
177  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
178  CHECKIERR( ierr, "Cannot initialize iMOAB" )
179 
180  int cmpAtmAppID = -1;
181  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
182  int cplAtmAppID = -1; // -1 means it is not initialized
183  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
184 #ifdef ENABLE_ATMOCN_COUPLING
185  int cmpOcnAppID = -1;
186  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
187  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
188  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
189  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
190 #endif
191 
192  if( couComm != MPI_COMM_NULL )
193  {
194  MPI_Comm_rank( couComm, &rankInCouComm );
195  // Register all the applications on the coupler PEs
196  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
197  cplAtmPID ); // atm on coupler pes
198  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
199 #ifdef ENABLE_ATMOCN_COUPLING
200  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
201  cplOcnPID ); // ocn on coupler pes
202  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
203 #endif
204  }
205 
206  if( atmComm != MPI_COMM_NULL )
207  {
208  MPI_Comm_rank( atmComm, &rankInAtmComm );
209  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
210  CHECKIERR( ierr, "Cannot register ATM App" )
211  }
212 
213 #ifdef ENABLE_ATMOCN_COUPLING
214  if( ocnComm != MPI_COMM_NULL )
215  {
216  MPI_Comm_rank( ocnComm, &rankInOcnComm );
217  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
218  CHECKIERR( ierr, "Cannot register OCN App" )
219  }
220 #endif
221 
222  // atm
223  ierr =
224  setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
225  &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
226  CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
227 
228  if( couComm != MPI_COMM_NULL )
229  { // write only for n==1 case
230  char outputFileTgt3[] = "recvAtmx.h5m";
231  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileTgt3, fileWriteOptions );
232  CHECKIERR( ierr, "cannot write atm mesh after receiving" )
233  }
234  MPI_Barrier( MPI_COMM_WORLD );
235 
236 #ifdef ENABLE_ATMOCN_COUPLING
237  // ocean
238  ierr =
239  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
240  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
241  CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
242 
243  MPI_Barrier( MPI_COMM_WORLD );
244 
245 #endif // #ifdef ENABLE_ATMOCN_COUPLING
246 
247  MPI_Barrier( MPI_COMM_WORLD );
248 
249 #ifdef ENABLE_ATMOCN_COUPLING
250  if( couComm != MPI_COMM_NULL )
251  {
252  // now compute intersection between OCNx and ATMx on coupler PEs
253  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
254  CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
255  }
256 #endif
257 
258  int disc_orders[1] = { 1 };
259  const std::string weights_identifiers[1] = { "bilinear" };
260  const std::string disc_methods[1] = { "fv" };
261  const std::string dof_tag_names[1] = { "GLOBAL_ID" };
262  const std::string method = "bilin";
263 #ifdef ENABLE_ATMOCN_COUPLING
264  if( couComm != MPI_COMM_NULL )
265  {
266  PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
267  ierr = iMOAB_ComputeMeshIntersectionOnSphere( cplAtmPID, cplOcnPID, cplAtmOcnPID );
268  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
269  // basically, atm was redistributed according to target (ocean) partition, to "cover" the
270  // ocean partitions check if intx valid, write some h5m intx file
271  CHECKIERR( ierr, "cannot compute intersection" )
272  POP_TIMER( couComm, rankInCouComm )
273  }
274 
275  if( couComm != MPI_COMM_NULL )
276  {
277 
278  // We just need to create a comm graph to internally transfer data from coupler atm to coupler ocean
279  // ierr = iMOAB_CoverageGraph( &couComm, cplAtm2PID, cplAtm2OcnPID, cplAtm2OcnPID, &cplatm2, &atm2ocnid,
280  // &cplocn ); // it happens over joint communicator
281  int type1 = 3;
282  int type2 = 3;
283  ierr = iMOAB_ComputeCommGraph( cplAtmPID, cplAtmOcnPID, &couComm, &couPEGroup, &couPEGroup, &type1, &type2,
284  &cplatm, &atmocnid );
285  CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean from atm" )
286  POP_TIMER( couComm, rankInCouComm ) // hijack this rank
287  }
288 #endif
289 
290  MPI_Barrier( MPI_COMM_WORLD );
291 
292  int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 0, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
293 
294 #ifdef ENABLE_ATMOCN_COUPLING
295 
296  if( couComm != MPI_COMM_NULL )
297  {
298  PUSH_TIMER( "Compute the projection weights with TempestRemap" )
299  ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(),
300  disc_methods[0].c_str(), &disc_orders[0], disc_methods[0].c_str(),
301  &disc_orders[0], method.c_str(), &fNoBubble, &fMonotoneTypeID,
302  &fVolumetric, &fInverseDistanceMap, &fNoConserve, &fValidate,
303  dof_tag_names[0].c_str(), dof_tag_names[0].c_str() );
304  CHECKIERR( ierr, "cannot compute scalar projection weights" )
305  POP_TIMER( couComm, rankInCouComm )
306 
307  // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
308 #ifdef MOAB_HAVE_PNETCDF
309  {
310  std::stringstream outf;
311  outf << "atm_ocn_bilin_map_p" << endG4 - startG4 + 1 << ".nc"; // number of tasks on coupler
312  std::string mapfile = outf.str(); // write in parallel the map file, for debugging
313  ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(), outf.str().c_str() );
314  CHECKIERR( ierr, "failed to write map file to disk" );
315  }
316 #endif
317  }
318 
319 #endif
320 
321  MPI_Barrier( MPI_COMM_WORLD );
322 
323  int tagIndex[2];
324  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
325  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
326 
327  const char* bottomFields = "Sa_dens:Sa_pbot";
328  const char* bottomProjectedFields = "Sa_dens:Sa_pbot";
329 
330  if( ocnComm != MPI_COMM_NULL )
331  {
332  context_id = cplocn; // id for ocean on coupler
333  ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
334  CHECKIERR( ierr, "failed to define the field tag Sa_dens:Sa_pbot" );
335  }
336 
337  if( couComm != MPI_COMM_NULL )
338  {
339  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
340  CHECKIERR( ierr, "failed to define the field tag Sa_dens:Sa_pbot" );
341 #ifdef ENABLE_ATMOCN_COUPLING
342  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
343  CHECKIERR( ierr, "failed to define the field tag Sa_dens:Sa_pbot" );
344 #endif
345  }
346 
347  // need to make sure that the coverage mesh (created during intx method) received the tag that
348  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
349  // need to change the migrate method to accommodate any GLL tag
350  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
351  // (cplAtmPID), using the new coverage graph communicator
352 
353  // make the tag 0, to check we are actually sending needed data
354  {
355  if( cplAtmAppID >= 0 )
356  {
357  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
358  /*
359  * Each process in the communicator will have access to a local mesh instance, which
360  * will contain the original cells in the local partition and ghost entities. Number of
361  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
362  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
363  * numbers.
364  */
365  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
366  CHECKIERR( ierr, "failed to get num primary elems" );
367  int numAllElem = nelem[2];
368  std::vector< double > vals;
369  int storLeng = atmCompNDoFs * numAllElem * 3; // 2 tags
370  int eetype = 1;
371 
372  vals.resize( storLeng );
373  for( int k = 0; k < storLeng; k++ )
374  vals[k] = 0.;
375 
376  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
377  CHECKIERR( ierr, "cannot make tag nul" )
378  // set the tag to 0
379  }
380  }
381 
382  const char* concat_fieldname = "Sa_dens:Sa_pbot";
383  const char* concat_fieldnameT = "Sa_dens:Sa_pbot";
384 
385 #ifdef ENABLE_ATMOCN_COUPLING
386  // first hop
387  PUSH_TIMER( "Send/receive data from atm component to coupler in atm context" )
388  if( atmComm != MPI_COMM_NULL )
389  {
390  // as always, use nonblocking sends
391  // this is for projection to ocean:
392  ierr = iMOAB_SendElementTag( cmpAtmPID, bottomFields, &atmCouComm, &cplatm );
393  CHECKIERR( ierr, "cannot send tag values" )
394  }
395  if( couComm != MPI_COMM_NULL )
396  {
397  // receive on atm on coupler pes
398  ierr = iMOAB_ReceiveElementTag( cplAtmPID, bottomFields, &atmCouComm, &cmpatm );
399  CHECKIERR( ierr, "cannot receive tag values" )
400  }
402 
403  // we can now free the sender buffers
404  if( atmComm != MPI_COMM_NULL )
405  {
406  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplatm ); // context is for ocean
407  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
408  }
409 
410  // second hop, is from atm towards ocean, on coupler
411  // it should send from each part on coupler towards the coverage set that should form the
412  // rings around target cells (ocean)
413  // basically we should send to more cells than needed just for intersection
414  //TODO
415  if( couComm != MPI_COMM_NULL )
416  {
417  // send using the par comm graph computed by iMOAB_ComputeCommGraph
418  ierr = iMOAB_SendElementTag( cplAtmPID, bottomFields, &couComm, &atmocnid );
419  CHECKIERR( ierr, "cannot send tag values towards coverage mesh for bilinear map" )
420 
421  ierr = iMOAB_ReceiveElementTag( cplAtmOcnPID, bottomFields, &couComm, &cplatm );
422  CHECKIERR( ierr, "cannot receive tag values for bilinear map" )
423 
424  ierr = iMOAB_FreeSenderBuffers( cplAtmPID, &atmocnid );
425  CHECKIERR( ierr, "cannot free buffers" )
426  }
427 
428  if( couComm != MPI_COMM_NULL )
429  {
430  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
431  on the source mesh and get the projection on the target mesh */
432  PUSH_TIMER( "Apply Scalar projection weights" )
433  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), concat_fieldname,
434  concat_fieldnameT );
435  CHECKIERR( ierr, "failed to compute projection weight application" );
436  POP_TIMER( couComm, rankInCouComm )
437  {
438  char outputFileTgt[] = "fOcnBilinOnCpl.h5m";
439  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
440  CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
441  }
442  }
443 
444  if( couComm != MPI_COMM_NULL )
445  {
446  // need to use ocean comp id for context
447  context_id = cmpocn; // id for ocean on comp
448  ierr = iMOAB_SendElementTag( cplOcnPID, "Sa_dens:Sa_pbot", &ocnCouComm, &context_id );
449  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
450  }
451 
452  // receive on component 2, ocean
453  if( ocnComm != MPI_COMM_NULL )
454  {
455  context_id = cplocn; // id for ocean on coupler
456  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "Sa_dens:Sa_pbot", &ocnCouComm, &context_id );
457  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
458  }
459 
460  MPI_Barrier( MPI_COMM_WORLD );
461 
462  if( couComm != MPI_COMM_NULL )
463  {
464  context_id = cmpocn;
465  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
466  CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
467  }
468  if( ocnComm != MPI_COMM_NULL )
469  {
470  char outputFileOcn[] = "OcnWithProjBilin.h5m";
471  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
472  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
473  }
474  // do a check agains a baseline test
475  if( !no_regression_test && ( ocnComm != MPI_COMM_NULL ) )
476  {
477  // the same as remap test
478  // get temp field on ocean, from conservative, the global ids, and check to the baseline file
479  // first get GlobalIds from ocn, and fields:
480  int nverts[3], nelem[3];
481  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
482  CHECKIERR( ierr, "failed to get ocn mesh info" );
483  std::vector< int > gidElems;
484  gidElems.resize( nelem[2] );
485  std::vector< double > tempElems;
486  tempElems.resize( nelem[2] );
487  // get global id storage
488  const std::string GidStr = "GLOBAL_ID"; // hard coded too
489  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
490  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
491  CHECKIERR( ierr, "failed to define global id tag" );
492 
493  int ent_type = 1;
494  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
495  CHECKIERR( ierr, "failed to get global ids" );
496  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "Sa_pbot", &nelem[2], &ent_type, &tempElems[0] );
497  CHECKIERR( ierr, "failed to get temperature field" );
498  // {
499  // // write baseline file
500  // std::fstream fs;
501  // fs.open( "baseline3.txt", std::fstream::out );
502  // fs << std::setprecision( 15 ); // maximum precision for doubles
503  // for( size_t i = 0; i < tempElems.size(); i++ )
504  // fs << gidElems[i] << " " << tempElems[i] << "\n";
505  // fs.close();
506  // }
507  int err_code = 1;
508  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
509  if( 0 == err_code ) std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
510  }
511 
512 #endif
513 
514 #ifdef ENABLE_ATMOCN_COUPLING
515  if( couComm != MPI_COMM_NULL )
516  {
517  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
518  CHECKIERR( ierr, "cannot deregister app intx AO" )
519  }
520  if( ocnComm != MPI_COMM_NULL )
521  {
522  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
523  CHECKIERR( ierr, "cannot deregister app OCN1" )
524  }
525 #endif // ENABLE_ATMOCN_COUPLING
526 
527  if( atmComm != MPI_COMM_NULL )
528  {
529  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
530  CHECKIERR( ierr, "cannot deregister app ATM1" )
531  }
532 
533 #ifdef ENABLE_ATMOCN_COUPLING
534  if( couComm != MPI_COMM_NULL )
535  {
536  ierr = iMOAB_DeregisterApplication( cplOcnPID );
537  CHECKIERR( ierr, "cannot deregister app OCNX" )
538  }
539 #endif // ENABLE_ATMOCN_COUPLING
540 
541  if( couComm != MPI_COMM_NULL )
542  {
543  ierr = iMOAB_DeregisterApplication( cplAtmPID );
544  CHECKIERR( ierr, "cannot deregister app ATMX" )
545  }
546 
547  //#endif
548  ierr = iMOAB_Finalize();
549  CHECKIERR( ierr, "did not finalize iMOAB" )
550 
551  // free atm coupler group and comm
552  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
553  MPI_Group_free( &joinAtmCouGroup );
554  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
555 
556 #ifdef ENABLE_ATMOCN_COUPLING
557  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
558  // free ocn - coupler group and comm
559  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
560  MPI_Group_free( &joinOcnCouGroup );
561 #endif
562 
563  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
564 
565  MPI_Group_free( &atmPEGroup );
566 #ifdef ENABLE_ATMOCN_COUPLING
567  MPI_Group_free( &ocnPEGroup );
568 #endif
569 
570  MPI_Group_free( &couPEGroup );
571  MPI_Group_free( &jgroup );
572 
573  MPI_Finalize();
574 
575  return 0;
576 }

References ProgOptions::addOpt(), atmFilename, CHECKIERR, cmpatm, create_group_and_comm(), create_joint_comm_group(), DENSE_DOUBLE, DENSE_INTEGER, endG1, endG2, fileWriteOptions(), groupTasks, ierr, iMOAB_AppID, iMOAB_DefineTagStorage(), iMOAB_DeregisterApplication(), iMOAB_Finalize(), iMOAB_GetDoubleTagStorage(), iMOAB_GetIntTagStorage(), iMOAB_GetMeshInfo(), iMOAB_Initialize(), iMOAB_RegisterApplication(), iMOAB_SetDoubleTagStorage(), iMOAB_WriteMesh(), jgroup, MPI_COMM_WORLD, nghlay, numProcesses, ProgOptions::parseCommandLine(), POP_TIMER, PUSH_TIMER, rankInAtmComm, rankInGlobalComm, readopts(), setup_component_coupler_meshes(), startG1, and startG2.