MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_map2.cpp File Reference
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "MBParallelConventions.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"
#include "moab/CpuTimer.hpp"
#include "moab/ProgOptions.hpp"
#include <iostream>
#include <sstream>
#include "imoab_coupler_utils.hpp"
+ Include dependency graph for imoab_map2.cpp:

Go to the source code of this file.

Functions

int main (int argc, char *argv[])
 

Function Documentation

◆ main()

int main ( int  argc,
char *  argv[] 
)

Definition at line 36 of file imoab_map2.cpp.

37 {
38  int ierr;
40  MPI_Group jgroup;
41  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
42 
43  // Timer data
44  moab::CpuTimer timer;
45  double timer_ops;
46  std::string opName;
47 
48  MPI_Init( &argc, &argv );
49  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
50  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
51 
52  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
53 
54  std::string atmFilename = TestDir + "unittest/srcWithSolnTag.h5m";
55  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
56  // intx atm/ocn is not in e3sm yet, give a number
57  // 6 * 100+ 18 = 618 : atmocnid
58  // 9 LND, 10 CPLLND
59  // 6 * 100 + 10 = 610 atmlndid:
60  // cmpatm is for atm on atm pes
61  // cmpocn is for ocean, on ocean pe
62  // cplatm is for atm on coupler pes
63  // cplocn is for ocean on coupelr pes
64  // atmocnid is for intx atm / ocn on coupler pes
65  //
66  int rankInAtmComm = -1;
67  int cmpatm = 5,
68  cplatm = 6; // component ids are unique over all pes, and established in advance;
69 
70  std::string ocnFilename = TestDir + "unittest/outTri15_8.h5m";
71  std::string mapFilename = TestDir + "unittest/mapNE20_FV15.nc"; // this is a netcdf file!
72 
73  std::string baseline = TestDir + "unittest/baseline2.txt";
74  int rankInOcnComm = -1;
75  int cmpocn = 17, cplocn = 18,
76  atmocnid = 618; // component ids are unique over all pes, and established in advance;
77 
78  int rankInCouComm = -1;
79 
80  int nghlay = 0; // number of ghost layers for loading the file
81  std::vector< int > groupTasks;
82  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
83 
84  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
85  int context_id = -1; // used now for freeing buffers
86 
87  // default: load atm / source on 2 proc, ocean / target on 2,
88  // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
89  // probably all source cells will be involved in coverage mesh on both tasks
90 
91  ProgOptions opts;
92  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
93  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
94  opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
95 
96  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
97  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
98 
99  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
100  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
101 
102  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
103  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
104 
105  int types[2] = { 3, 3 }; // type of source and target; 1 = SE, 2,= PC, 3 = FV
106  int disc_orders[2] = { 1, 1 }; // 1 is for FV and PC; 4 could be for SE
107  opts.addOpt< int >( "typeSource,x", "source type", &types[0] );
108  opts.addOpt< int >( "typeTarget,y", "target type", &types[1] );
109  opts.addOpt< int >( "orderSource,u", "source order", &disc_orders[0] );
110  opts.addOpt< int >( "orderTarget,v", "target oorder", &disc_orders[1] );
111  bool analytic_field = false;
112  opts.addOpt< void >( "analytic,q", "analytic field", &analytic_field );
113 
114  bool no_regression_test = false;
115  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
116  opts.parseCommandLine( argc, argv );
117 
118  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
119 
120  if( !rankInGlobalComm )
121  {
122  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1
123  << "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2
124  << "\n map file:" << mapFilename << "\n on tasks : " << startG4 << ":" << endG4 << "\n";
125  if( !no_regression_test )
126  {
127  std::cout << " check projection against baseline: " << baseline << "\n";
128  }
129  }
130 
131  // load files on 3 different communicators, groups
132  // first groups has task 0, second group tasks 0 and 1
133  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
134  // first groups has task 0, second group tasks 0 and 1
135  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
136  MPI_Group atmPEGroup;
137  MPI_Comm atmComm;
138  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
139  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
140 
141  MPI_Group ocnPEGroup;
142  MPI_Comm ocnComm;
143  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
144  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
145 
146  // we will always have a coupler
147  MPI_Group couPEGroup;
148  MPI_Comm couComm;
149  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
150  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
151 
152  // atm_coupler
153  MPI_Group joinAtmCouGroup;
154  MPI_Comm atmCouComm;
155  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
156  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
157 
158  // ocn_coupler
159  MPI_Group joinOcnCouGroup;
160  MPI_Comm ocnCouComm;
161  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
162  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
163 
164  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
165  CHECKIERR( ierr, "Cannot initialize iMOAB" )
166 
167  int cmpAtmAppID = -1;
168  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
169  int cplAtmAppID = -1; // -1 means it is not initialized
170  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
171 
172  int cmpOcnAppID = -1;
173  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
174  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
175  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
176  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
177 
178  if( couComm != MPI_COMM_NULL )
179  {
180  MPI_Comm_rank( couComm, &rankInCouComm );
181  // Register all the applications on the coupler PEs
182  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
183  cplAtmPID ); // atm on coupler pes
184  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
185 
186  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
187  cplOcnPID ); // ocn on coupler pes
188  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
189  }
190 
191  if( atmComm != MPI_COMM_NULL )
192  {
193  MPI_Comm_rank( atmComm, &rankInAtmComm );
194  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
195  CHECKIERR( ierr, "Cannot register ATM App" )
196  ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
197  CHECKIERR( ierr, "Cannot load atm mesh" )
198  }
199 
200  MPI_Barrier( MPI_COMM_WORLD );
201  if( ocnComm != MPI_COMM_NULL )
202  {
203  MPI_Comm_rank( ocnComm, &rankInOcnComm );
204  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
205  CHECKIERR( ierr, "Cannot register OCN App" )
206  ierr = iMOAB_LoadMesh( cmpOcnPID, ocnFilename.c_str(), readopts.c_str(), &nghlay );
207  CHECKIERR( ierr, "Cannot load ocn mesh" )
208  }
209 
210  MPI_Barrier( MPI_COMM_WORLD );
211 
212  if( couComm != MPI_COMM_NULL )
213  {
214  // now load map between OCNx and ATMx on coupler PEs
215  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
216  CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
217  }
218 
219  const std::string intx_from_file_identifier = "map-from-file";
220 
221  if( couComm != MPI_COMM_NULL )
222  {
223  int dummyCpl = -1;
224  int dummy_rowcol = -1;
225  int dummyType = 0;
226  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
227  intx_from_file_identifier.c_str(), mapFilename.c_str() );
228  CHECKIERR( ierr, "failed to load map file from disk" );
229  }
230 
231  if( atmCouComm != MPI_COMM_NULL )
232  {
233  int type = types[0]; // FV
234  int direction = 1; // from source to coupler; will create a mesh on cplAtmPID
235  // because it is like "coverage", context will be cplocn
236  ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
237  &cmpatm, &cplocn, &direction );
238  CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
239 #ifdef VERBOSE
240  if( *cplAtmPID >= 0 )
241  {
242  char prefix[] = "atmcov";
243  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
244  CHECKIERR( ierr, "failed to write local mesh" );
245  }
246 #endif
247  }
248  MPI_Barrier( MPI_COMM_WORLD );
249 
250  if( ocnCouComm != MPI_COMM_NULL )
251  {
252  int type = types[1]; // cells with GLOBAL_ID in ocean / target set
253  int direction = 2; // from coupler to target; will create a mesh on cplOcnPID
254  // it will be like initial migrate cmpocn <-> cplocn
255  ierr = iMOAB_MigrateMapMesh( cmpOcnPID, cplAtmOcnPID, cplOcnPID, &ocnCouComm, &ocnPEGroup, &couPEGroup, &type,
256  &cmpocn, &cplocn, &direction );
257  CHECKIERR( ierr, "failed to migrate mesh for ocn on coupler" );
258 
259 #ifdef VERBOSE
260  if( *cplOcnPID >= 0 )
261  {
262  char prefix[] = "ocntgt";
263  ierr = iMOAB_WriteLocalMesh( cplOcnPID, prefix );
264  CHECKIERR( ierr, "failed to write local ocean mesh" );
265  char outputFileRec[] = "CoupOcn.h5m";
266  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileRec, fileWriteOptions );
267  CHECKIERR( ierr, "failed to write ocean global mesh file" );
268  }
269 #endif
270  }
271  MPI_Barrier( MPI_COMM_WORLD );
272 
273  int tagIndex[2];
274  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
275  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] /*FV*/;
276 
277  const char* bottomTempField = "AnalyticalSolnSrcExact";
278  const char* bottomTempProjectedField = "Target_proj";
279 
280  if( couComm != MPI_COMM_NULL )
281  {
282  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
283  CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
284 
285  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
286  CHECKIERR( ierr, "failed to define the field tag Target_proj" );
287  }
288 
289  if( analytic_field && ( atmComm != MPI_COMM_NULL ) ) // we are on source /atm pes
290  {
291  // cmpOcnPID, "T_proj;u_proj;v_proj;"
292  ierr = iMOAB_DefineTagStorage( cmpAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
293  CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
294 
295  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
296  /*
297  * Each process in the communicator will have access to a local mesh instance, which will contain the
298  * original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
299  * blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
300  * for local, ghost and total numbers.
301  */
302  ierr = iMOAB_GetMeshInfo( cmpAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
303  CHECKIERR( ierr, "failed to get num primary elems" );
304  int numAllElem = nelem[2];
305  int eetype = 1;
306 
307  if( types[0] == 2 ) // point cloud
308  {
309  numAllElem = nverts[2];
310  eetype = 0;
311  }
312  std::vector< double > vals;
313  int storLeng = atmCompNDoFs * numAllElem;
314  vals.resize( storLeng );
315  for( int k = 0; k < storLeng; k++ )
316  vals[k] = k;
317 
318  ierr = iMOAB_SetDoubleTagStorage( cmpAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
319  CHECKIERR( ierr, "cannot make analytical tag" )
320  }
321 
322  // need to make sure that the coverage mesh (created during intx method) received the tag that
323  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
324  // need to change the migrate method to accommodate any GLL tag
325  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
326  // (cplAtmPID), using the new coverage graph communicator
327 
328  // make the tag 0, to check we are actually sending needed data
329  {
330  if( cplAtmAppID >= 0 )
331  {
332  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
333  /*
334  * Each process in the communicator will have access to a local mesh instance, which
335  * will contain the original cells in the local partition and ghost entities. Number of
336  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
337  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
338  * numbers.
339  */
340  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
341  CHECKIERR( ierr, "failed to get num primary elems" );
342  int numAllElem = nelem[2];
343  int eetype = 1;
344  if( types[0] == 2 ) // Point cloud
345  {
346  eetype = 0; // vertices
347  numAllElem = nverts[2];
348  }
349  std::vector< double > vals;
350  int storLeng = atmCompNDoFs * numAllElem;
351 
352  vals.resize( storLeng );
353  for( int k = 0; k < storLeng; k++ )
354  vals[k] = 0.;
355 
356  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
357  CHECKIERR( ierr, "cannot make tag nul" )
358 
359  // set the tag to 0
360  }
361  }
362 
363  const char* concat_fieldname = "AnalyticalSolnSrcExact";
364  const char* concat_fieldnameT = "Target_proj";
365 
366  {
367 
368  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
369  if( atmComm != MPI_COMM_NULL )
370  {
371  // as always, use nonblocking sends
372  // this is for projection to ocean:
373  ierr = iMOAB_SendElementTag( cmpAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
374  CHECKIERR( ierr, "cannot send tag values" )
375  }
376  if( couComm != MPI_COMM_NULL )
377  {
378  // receive on atm on coupler pes, that was redistributed according to coverage
379  ierr = iMOAB_ReceiveElementTag( cplAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cmpatm );
380  CHECKIERR( ierr, "cannot receive tag values" )
381  }
382 
383  // we can now free the sender buffers
384  if( atmComm != MPI_COMM_NULL )
385  {
386  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
387  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
388  }
390 #ifdef VERBOSE
391  if( *cplAtmPID >= 0 )
392  {
393  char prefix[] = "atmcov_withdata";
394  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
395  CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
396  }
397 #endif
398 
399  if( couComm != MPI_COMM_NULL )
400  {
401  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
402  on the source mesh and get the projection on the target mesh */
403  PUSH_TIMER( "Apply Scalar projection weights" )
404  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
405  concat_fieldname, concat_fieldnameT );
406  CHECKIERR( ierr, "failed to compute projection weight application" );
407  POP_TIMER( couComm, rankInCouComm )
408 
409  {
410  char outputFileTgt[] = "fOcnOnCpl6.h5m";
411  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
412  CHECKIERR( ierr, "could not write fOcnOnCpl6.h5m to disk" )
413  }
414  }
415 
416  // send the projected tag back to ocean pes, with send/receive tag
417  if( ocnComm != MPI_COMM_NULL )
418  {
419  int tagIndexIn2;
420  ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
421  &tagIndexIn2 );
422  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
423  "Target_proj on ocn pes" );
424  }
425  // send the tag to ocean pes, from ocean mesh on coupler pes
426  // from couComm, using common joint comm ocn_coupler
427  // as always, use nonblocking sends
428  // original graph (context is -1_
429  if( couComm != MPI_COMM_NULL )
430  {
431  // need to use ocean comp id for context
432  context_id = cmpocn; // id for ocean on comp
433  ierr = iMOAB_SendElementTag( cplOcnPID, "Target_proj", &ocnCouComm, &context_id );
434  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
435  }
436 
437  // receive on component 2, ocean
438  if( ocnComm != MPI_COMM_NULL )
439  {
440  context_id = cplocn; // id for ocean on coupler
441  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "Target_proj", &ocnCouComm, &context_id );
442  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
443  }
444 
445  if( couComm != MPI_COMM_NULL )
446  {
447  context_id = cmpocn;
448  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
449  }
450  MPI_Barrier( MPI_COMM_WORLD );
451 
452  if( ocnComm != MPI_COMM_NULL )
453  {
454 #ifdef VERBOSE
455  char outputFileOcn[] = "OcnWithProj.h5m";
456  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
457  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
458 #endif
459  // test results only for n == 1, for bottomTempProjectedField
460  if( !no_regression_test )
461  {
462  // the same as remap test
463  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
464  // first get GlobalIds from ocn, and fields:
465  int nverts[3], nelem[3];
466  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
467  CHECKIERR( ierr, "failed to get ocn mesh info" );
468  std::vector< int > gidElems;
469  gidElems.resize( nelem[2] );
470  std::vector< double > tempElems;
471  tempElems.resize( nelem[2] );
472  // get global id storage
473  const std::string GidStr = "GLOBAL_ID"; // hard coded too
474  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
475  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
476  CHECKIERR( ierr, "failed to define global id tag" );
477 
478  int ent_type = 1;
479  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
480  CHECKIERR( ierr, "failed to get global ids" );
481  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, bottomTempProjectedField, &nelem[2], &ent_type,
482  &tempElems[0] );
483  CHECKIERR( ierr, "failed to get temperature field" );
484  int err_code = 1;
485  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
486  if( 0 == err_code )
487  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
488  }
489  }
490 
491  } // end loop iterations n
492 
493  if( couComm != MPI_COMM_NULL )
494  {
495  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
496  CHECKIERR( ierr, "cannot deregister app intx AO" )
497  }
498  if( ocnComm != MPI_COMM_NULL )
499  {
500  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
501  CHECKIERR( ierr, "cannot deregister app OCN1" )
502  }
503 
504  if( atmComm != MPI_COMM_NULL )
505  {
506  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
507  CHECKIERR( ierr, "cannot deregister app ATM1" )
508  }
509 
510  if( couComm != MPI_COMM_NULL )
511  {
512  ierr = iMOAB_DeregisterApplication( cplOcnPID );
513  CHECKIERR( ierr, "cannot deregister app OCNX" )
514  }
515 
516  if( couComm != MPI_COMM_NULL )
517  {
518  ierr = iMOAB_DeregisterApplication( cplAtmPID );
519  CHECKIERR( ierr, "cannot deregister app ATMX" )
520  }
521 
522  //#endif
523  ierr = iMOAB_Finalize();
524  CHECKIERR( ierr, "did not finalize iMOAB" )
525 
526  // free atm coupler group and comm
527  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
528  MPI_Group_free( &joinAtmCouGroup );
529  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
530 
531  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
532  // free ocn - coupler group and comm
533  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
534  MPI_Group_free( &joinOcnCouGroup );
535 
536  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
537 
538  MPI_Group_free( &atmPEGroup );
539 
540  MPI_Group_free( &ocnPEGroup );
541 
542  MPI_Group_free( &couPEGroup );
543  MPI_Group_free( &jgroup );
544 
545  MPI_Finalize();
546 
547  return 0;
548 }

References ProgOptions::addOpt(), atmFilename, CHECKIERR, cmpatm, create_group_and_comm(), create_joint_comm_group(), DENSE_DOUBLE, DENSE_INTEGER, endG1, endG2, fileWriteOptions(), groupTasks, ierr, iMOAB_AppID, iMOAB_DefineTagStorage(), iMOAB_DeregisterApplication(), iMOAB_Finalize(), iMOAB_GetDoubleTagStorage(), iMOAB_GetIntTagStorage(), iMOAB_GetMeshInfo(), iMOAB_Initialize(), iMOAB_LoadMesh(), iMOAB_RegisterApplication(), iMOAB_SetDoubleTagStorage(), iMOAB_WriteLocalMesh(), iMOAB_WriteMesh(), jgroup, MPI_COMM_WORLD, nghlay, numProcesses, ProgOptions::parseCommandLine(), POP_TIMER, PUSH_TIMER, rankInAtmComm, rankInGlobalComm, readopts(), startG1, and startG2.