MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_read_map.cpp
Go to the documentation of this file.
1 /*
2  * This imoab_read_map test will simulate coupling between 2 components
3  * 2 meshes will be loaded from 2 files (src, tgt), and one map file
4  * after the map is read, in parallel, on coupler pes, with distributed rows, the
5  * coupler meshes for source and target will be generated, in a migration step,
6  * in which we will migrate from target pes according to row ids, to coupler target mesh,
7  * and from source to coverage mesh mesh on coupler. During this migration, par comm graphs
8  * will be established between source and coupler and target and coupler, which will assist
9  * in field transfer from source to target, through coupler
10  *
11  */
12 
13 #include "moab/Core.hpp"
14 #ifndef MOAB_HAVE_MPI
15 #error mbtempest tool requires MPI configuration
16 #endif
17 
18 // MPI includes
19 #include "moab_mpi.h"
20 #include "moab/ParallelComm.hpp"
21 #include "MBParallelConventions.h"
22 
23 #include "moab/iMOAB.h"
24 #include "TestUtil.hpp"
25 #include "moab/CpuTimer.hpp"
26 #include "moab/ProgOptions.hpp"
27 #include <iostream>
28 #include <sstream>
29 
30 #include "imoab_coupler_utils.hpp"
31 
32 #ifndef MOAB_HAVE_TEMPESTREMAP
33 #error The climate coupler test example requires MOAB configuration with TempestRemap
34 #endif
35 
36 int main( int argc, char* argv[] )
37 {
38  int ierr;
40  MPI_Group jgroup;
41  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
42 
43  // Timer data
44  moab::CpuTimer timer;
45  double timer_ops;
46  std::string opName;
47 
48  MPI_Init( &argc, &argv );
49  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
50  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
51 
52  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
53 
54  std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
55  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
56  // intx atm/ocn is not in e3sm yet, give a number
57  // 6 * 100+ 18 = 618 : atmocnid
58  // 9 LND, 10 CPLLND
59  // 6 * 100 + 10 = 610 atmlndid:
60  // cmpatm is for atm on atm pes
61  // cmpocn is for ocean, on ocean pe
62  // cplatm is for atm on coupler pes
63  // cplocn is for ocean on coupelr pes
64  // atmocnid is for intx atm / ocn on coupler pes
65  //
66  int rankInAtmComm = -1;
67  int cmpatm = 5,
68  cplatm = 6; // component ids are unique over all pes, and established in advance;
69 
70  std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
71  std::string mapFilename = TestDir + "unittest/atm_ocn_map.nc"; // this is a netcdf file!
72 
73  std::string baseline = TestDir + "unittest/baseline1.txt";
74  int rankInOcnComm = -1;
75  int cmpocn = 17, cplocn = 18,
76  atmocnid = 618; // component ids are unique over all pes, and established in advance;
77 
78  int rankInCouComm = -1;
79 
80  int nghlay = 0; // number of ghost layers for loading the file
81  std::vector< int > groupTasks;
82  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
83 
84  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
85  int context_id = -1; // used now for freeing buffers
86 
87  // default: load atm on 2 proc, ocean on 2,
88  // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
89  // probably all source cells will be involved in coverage mesh on both tasks
90 
91  ProgOptions opts;
92  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
93  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
94  opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
95 
96  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
97  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
98 
99  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
100  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
101 
102  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
103  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
104 
105  int n = 1; // number of send/receive / project / send back cycles
106  opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
107 
108  bool no_regression_test = false;
109  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
110  opts.parseCommandLine( argc, argv );
111 
112  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
113 
114  if( !rankInGlobalComm )
115  {
116  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1
117  << "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2
118  << "\n map file:" << mapFilename << "\n on tasks : " << startG4 << ":" << endG4 << "\n";
119  if( !no_regression_test )
120  {
121  std::cout << " check projection against baseline: " << baseline << "\n";
122  }
123  }
124 
125  // load files on 3 different communicators, groups
126  // first groups has task 0, second group tasks 0 and 1
127  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
128  // first groups has task 0, second group tasks 0 and 1
129  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
130  MPI_Group atmPEGroup;
131  MPI_Comm atmComm;
132  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
133  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
134 
135  MPI_Group ocnPEGroup;
136  MPI_Comm ocnComm;
137  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
138  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
139 
140  // we will always have a coupler
141  MPI_Group couPEGroup;
142  MPI_Comm couComm;
143  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
144  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
145 
146  // atm_coupler
147  MPI_Group joinAtmCouGroup;
148  MPI_Comm atmCouComm;
149  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
150  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
151 
152  // ocn_coupler
153  MPI_Group joinOcnCouGroup;
154  MPI_Comm ocnCouComm;
155  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
156  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
157 
158  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
159  CHECKIERR( ierr, "Cannot initialize iMOAB" )
160 
161  int cmpAtmAppID = -1;
162  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
163  int cplAtmAppID = -1; // -1 means it is not initialized
164  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
165 
166  int cmpOcnAppID = -1;
167  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
168  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
169  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
170  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
171 
172  if( couComm != MPI_COMM_NULL )
173  {
174  MPI_Comm_rank( couComm, &rankInCouComm );
175  // Register all the applications on the coupler PEs
176  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
177  cplAtmPID ); // atm on coupler pes
178  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
179 
180  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
181  cplOcnPID ); // ocn on coupler pes
182  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
183  }
184 
185  if( atmComm != MPI_COMM_NULL )
186  {
187  MPI_Comm_rank( atmComm, &rankInAtmComm );
188  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
189  CHECKIERR( ierr, "Cannot register ATM App" )
190  ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
191  CHECKIERR( ierr, "Cannot load atm mesh" )
192  }
193 
194  MPI_Barrier( MPI_COMM_WORLD );
195  if( ocnComm != MPI_COMM_NULL )
196  {
197  MPI_Comm_rank( ocnComm, &rankInOcnComm );
198  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
199  CHECKIERR( ierr, "Cannot register OCN App" )
200  ierr = iMOAB_LoadMesh( cmpOcnPID, ocnFilename.c_str(), readopts.c_str(), &nghlay );
201  CHECKIERR( ierr, "Cannot load ocn mesh" )
202  }
203 
204  MPI_Barrier( MPI_COMM_WORLD );
205 
206  if( couComm != MPI_COMM_NULL )
207  {
208  // now load map between OCNx and ATMx on coupler PEs
209  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
210  CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
211  }
212 
213  int disc_orders[3] = { 4, 1, 1 };
214 
215  const std::string intx_from_file_identifier = "map-from-file";
216 
217  if( couComm != MPI_COMM_NULL )
218  {
219  int dummyCpl = -1;
220  int dummy_rowcol = -1;
221  int dummyType = 0;
222  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
223  intx_from_file_identifier.c_str(), mapFilename.c_str() );
224  CHECKIERR( ierr, "failed to load map file from disk" );
225  }
226 
227  if( atmCouComm != MPI_COMM_NULL )
228  {
229  int type = 1; // quads in source set
230  int direction = 1; // from source to coupler; will create a mesh on cplAtmPID
231  // because it is like "coverage", context will be cplocn
232  ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
233  &cmpatm, &cplocn, &direction );
234  CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
235 #ifdef VERBOSE
236  if( *cplAtmPID >= 0 )
237  {
238  char prefix[] = "atmcov";
239  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
240  CHECKIERR( ierr, "failed to write local mesh" );
241  }
242 #endif
243  }
244  MPI_Barrier( MPI_COMM_WORLD );
245 
246  if( ocnCouComm != MPI_COMM_NULL )
247  {
248  int type = 3; // cells with GLOBAL_ID in ocean / target set
249  int direction = 2; // from coupler to target; will create a mesh on cplOcnPID
250  // it will be like initial migrate cmpocn <-> cplocn
251  ierr = iMOAB_MigrateMapMesh( cmpOcnPID, cplAtmOcnPID, cplOcnPID, &ocnCouComm, &ocnPEGroup, &couPEGroup, &type,
252  &cmpocn, &cplocn, &direction );
253  CHECKIERR( ierr, "failed to migrate mesh for ocn on coupler" );
254 #ifdef VERBOSE
255  if( *cplOcnPID >= 0 )
256  {
257  char prefix[] = "ocntgt";
258  ierr = iMOAB_WriteLocalMesh( cplOcnPID, prefix );
259  CHECKIERR( ierr, "failed to write local ocean mesh" );
260  char outputFileRec[] = "CoupOcn.h5m";
261  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileRec, fileWriteOptions );
262  CHECKIERR( ierr, "failed to write ocean global mesh file" );
263  }
264 #endif
265  }
266  MPI_Barrier( MPI_COMM_WORLD );
267 
268  int tagIndex[2];
269  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
270  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
271 
272  const char* bottomFields = "a2oTbot:a2oUbot:a2oVbot";
273  const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
274 
275  if( couComm != MPI_COMM_NULL )
276  {
277  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
278  CHECKIERR( ierr, "failed to define the field tags a2oTbot:a2oUbot:a2oVbot" );
279 
280  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
281  CHECKIERR( ierr, "failed to define the field tags a2oTbot_proj:a2oUbot_proj:a2oVbot_proj " );
282  }
283 
284  // need to make sure that the coverage mesh (created during intx method) received the tag that
285  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
286  // need to change the migrate method to accommodate any GLL tag
287  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
288  // (cplAtmPID), using the new coverage graph communicator
289 
290  // make the tag 0, to check we are actually sending needed data
291  {
292  if( cplAtmAppID >= 0 )
293  {
294  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
295  /*
296  * Each process in the communicator will have access to a local mesh instance, which
297  * will contain the original cells in the local partition and ghost entities. Number of
298  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
299  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
300  * numbers.
301  */
302  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
303  CHECKIERR( ierr, "failed to get num primary elems" );
304  int numAllElem = nelem[2];
305  std::vector< double > vals;
306  int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags
307  int eetype = 1;
308 
309  vals.resize( storLeng );
310  for( int k = 0; k < storLeng; k++ )
311  vals[k] = 0.;
312 
313  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
314  CHECKIERR( ierr, "cannot make tag nul" )
315  }
316  }
317 
318  const char* concat_fieldname = "a2oTbot:a2oUbot:a2oVbot";
319  const char* concat_fieldnameT = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
320 
321  // start a virtual loop for number of iterations
322  for( int iters = 0; iters < n; iters++ )
323  {
324 
325  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
326  if( atmComm != MPI_COMM_NULL )
327  {
328  // as always, use nonblocking sends
329  // this is for projection to ocean:
330  ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn );
331  CHECKIERR( ierr, "cannot send tag values" )
332 #ifdef GRAPH_INFO
333  int is_sender = 1;
334  int context = cplocn;
335  iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnS" );
336 #endif
337  }
338  if( couComm != MPI_COMM_NULL )
339  {
340  // receive on atm on coupler pes, that was redistributed according to coverage
341  ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cmpatm );
342  CHECKIERR( ierr, "cannot receive tag values" )
343 #ifdef GRAPH_INFO
344  int is_sender = 0;
345  int context = atmocnid; // the same context
346  iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnR" );
347 #endif
348  }
349 
350  // we can now free the sender buffers
351  if( atmComm != MPI_COMM_NULL )
352  {
353  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
354  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
355  }
357 #ifdef VERBOSE
358  if( *cplAtmPID >= 0 && n == 1 )
359  {
360  char prefix[] = "atmcov_withdata";
361  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
362  CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
363  }
364 
365  if( couComm != MPI_COMM_NULL && 1 == n )
366  {
367  // write only for n==1 case
368  char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
369  ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
370  CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
371  }
372 #endif
373 
374  if( couComm != MPI_COMM_NULL )
375  {
376  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
377  on the source mesh and get the projection on the target mesh */
378  PUSH_TIMER( "Apply Scalar projection weights" )
379  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
380  concat_fieldname, concat_fieldnameT );
381  CHECKIERR( ierr, "failed to compute projection weight application" );
382  POP_TIMER( couComm, rankInCouComm )
383  if( 1 == n ) // write only for n==1 case
384  {
385  char outputFileTgt[] = "fOcnOnCpl8.h5m";
386  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
387  CHECKIERR( ierr, "could not write fOcnOnCpl8.h5m to disk" )
388  }
389  }
390 
391  // send the projected tag back to ocean pes, with send/receive tag
392  if( ocnComm != MPI_COMM_NULL )
393  {
394  int tagIndexIn2;
395  ierr =
396  iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
397  CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
398  "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj on ocn pes" );
399  }
400  // send the tag to ocean pes, from ocean mesh on coupler pes
401  // from couComm, using common joint comm ocn_coupler
402  // as always, use nonblocking sends
403  // original graph (context is -1_
404  if( couComm != MPI_COMM_NULL )
405  {
406  // need to use ocean comp id for context
407  context_id = cmpocn; // id for ocean on comp
408  ierr =
409  iMOAB_SendElementTag( cplOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm, &context_id );
410  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
411  }
412 
413  // receive on component 2, ocean
414  if( ocnComm != MPI_COMM_NULL )
415  {
416  context_id = cplocn; // id for ocean on coupler
417  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm,
418  &context_id );
419  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
420  }
421 
422  if( couComm != MPI_COMM_NULL )
423  {
424  context_id = cmpocn;
425  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
426  }
427  MPI_Barrier( MPI_COMM_WORLD );
428 
429  if( ocnComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case
430  {
431 #ifdef VERBOSE
432  char outputFileOcn[] = "OcnWithProj.h5m";
433  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
434  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
435 #endif
436  // test results only for n == 1, for bottomTempProjectedField = "a2oTbot_proj"
437  if( !no_regression_test )
438  {
439  // the same as remap test
440  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
441  // first get GlobalIds from ocn, and fields:
442  int nverts[3], nelem[3];
443  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
444  CHECKIERR( ierr, "failed to get ocn mesh info" );
445  std::vector< int > gidElems;
446  gidElems.resize( nelem[2] );
447  std::vector< double > tempElems;
448  tempElems.resize( nelem[2] );
449  // get global id storage
450  const std::string GidStr = "GLOBAL_ID"; // hard coded too
451  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
452  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
453  CHECKIERR( ierr, "failed to define global id tag" );
454 
455  int ent_type = 1;
456  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
457  CHECKIERR( ierr, "failed to get global ids" );
458  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
459  CHECKIERR( ierr, "failed to get temperature field" );
460  int err_code = 1;
461  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
462  if( 0 == err_code )
463  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
464  }
465  }
466 
467  } // end loop iterations n
468 
469  if( couComm != MPI_COMM_NULL )
470  {
471  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
472  CHECKIERR( ierr, "cannot deregister app intx AO" )
473  }
474  if( ocnComm != MPI_COMM_NULL )
475  {
476  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
477  CHECKIERR( ierr, "cannot deregister app OCN1" )
478  }
479 
480  if( atmComm != MPI_COMM_NULL )
481  {
482  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
483  CHECKIERR( ierr, "cannot deregister app ATM1" )
484  }
485 
486  if( couComm != MPI_COMM_NULL )
487  {
488  ierr = iMOAB_DeregisterApplication( cplOcnPID );
489  CHECKIERR( ierr, "cannot deregister app OCNX" )
490  }
491 
492  if( couComm != MPI_COMM_NULL )
493  {
494  ierr = iMOAB_DeregisterApplication( cplAtmPID );
495  CHECKIERR( ierr, "cannot deregister app ATMX" )
496  }
497 
498  //#endif
499  ierr = iMOAB_Finalize();
500  CHECKIERR( ierr, "did not finalize iMOAB" )
501 
502  // free atm coupler group and comm
503  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
504  MPI_Group_free( &joinAtmCouGroup );
505  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
506 
507  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
508  // free ocn - coupler group and comm
509  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
510  MPI_Group_free( &joinOcnCouGroup );
511 
512  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
513 
514  MPI_Group_free( &atmPEGroup );
515 
516  MPI_Group_free( &ocnPEGroup );
517 
518  MPI_Group_free( &couPEGroup );
519  MPI_Group_free( &jgroup );
520 
521  MPI_Finalize();
522 
523  return 0;
524 }