MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_map_target.cpp
Go to the documentation of this file.
1 /*
2  * This imoab_map_target test will simulate coupling between 2 components
3  * 2 meshes will be loaded from 2 files (src, tgt), and one map file
4  * the target mesh is migrated to coupler with a partitioning method
5  * after the map is read, in parallel, on coupler pes, with row ownership from
6  * target mesh, the
7  * coupler meshes for source will be generated, in a migration step,
8  * from source to coverage mesh mesh on coupler. During this migration, par comm graph
9  * will be established between source and coupler, which will assist
10  * in field transfer from source to coupler; the original migrate
11  * will be used for target mesh from coupler to target component
12  *
13  */
14 
15 #include "moab/Core.hpp"
16 #ifndef MOAB_HAVE_MPI
17 #error mbtempest tool requires MPI configuration
18 #endif
19 
20 // MPI includes
21 #include "moab_mpi.h"
22 #include "moab/ParallelComm.hpp"
23 #include "MBParallelConventions.h"
24 
25 #include "moab/iMOAB.h"
26 #include "TestUtil.hpp"
27 #include "moab/CpuTimer.hpp"
28 #include "moab/ProgOptions.hpp"
29 #include <iostream>
30 #include <sstream>
31 
32 #include "imoab_coupler_utils.hpp"
33 
34 #ifndef MOAB_HAVE_TEMPESTREMAP
35 #error The climate coupler test example requires MOAB configuration with TempestRemap
36 #endif
37 
38 int main( int argc, char* argv[] )
39 {
40  int ierr;
42  MPI_Group jgroup;
43  std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
44 
45  // Timer data
46  moab::CpuTimer timer;
47  double timer_ops;
48  std::string opName;
49 
50  MPI_Init( &argc, &argv );
51  MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
52  MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
53 
54  MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
55 
56  std::string atmFilename = TestDir + "unittest/srcWithSolnTag.h5m";
57  // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
58  // intx atm/ocn is not in e3sm yet, give a number
59  // 6 * 100+ 18 = 618 : atmocnid
60  // 9 LND, 10 CPLLND
61  // 6 * 100 + 10 = 610 atmlndid:
62  // cmpatm is for atm on atm pes
63  // cmpocn is for ocean, on ocean pe
64  // cplatm is for atm on coupler pes
65  // cplocn is for ocean on coupelr pes
66  // atmocnid is for intx atm / ocn on coupler pes
67  //
68  int rankInAtmComm = -1;
69  int cmpatm = 5,
70  cplatm = 6; // component ids are unique over all pes, and established in advance;
71 
72  std::string ocnFilename = TestDir + "unittest/outTri15_8.h5m";
73  std::string mapFilename = TestDir + "unittest/mapNE20_FV15.nc"; // this is a netcdf file!
74 
75  std::string baseline = TestDir + "unittest/baseline2.txt";
76  int rankInOcnComm = -1;
77  int cmpocn = 17, cplocn = 18,
78  atmocnid = 618; // component ids are unique over all pes, and established in advance;
79 
80  int rankInCouComm = -1;
81 
82  int nghlay = 0; // number of ghost layers for loading the file
83  std::vector< int > groupTasks;
84  int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
85 
86  int startG4 = startG1, endG4 = endG1; // these are for coupler layout
87  int context_id; // used now for freeing buffers
88 
89  int repartitioner_scheme = 0;
90 #ifdef MOAB_HAVE_ZOLTAN
91  repartitioner_scheme = 2; // use the graph partitioner in that caseS
92 #endif
93 
94  // default: load atm / source on 2 proc, ocean / target on 2,
95  // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
96  // probably all source cells will be involved in coverage mesh on both tasks
97 
98  ProgOptions opts;
99  opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
100  opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
101  opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
102 
103  opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
104  opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
105 
106  opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
107  opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
108 
109  opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
110  opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
111 
112  int types[2] = { 3, 3 }; // type of source and target; 1 = SE, 2,= PC, 3 = FV
113  int disc_orders[2] = { 1, 1 }; // 1 is for FV and PC; 4 could be for SE
114  opts.addOpt< int >( "typeSource,x", "source type", &types[0] );
115  opts.addOpt< int >( "typeTarget,y", "target type", &types[1] );
116  opts.addOpt< int >( "orderSource,u", "source order", &disc_orders[0] );
117  opts.addOpt< int >( "orderTarget,v", "target oorder", &disc_orders[1] );
118  bool analytic_field = false;
119  opts.addOpt< void >( "analytic,q", "analytic field", &analytic_field );
120 
121  bool no_regression_test = false;
122  opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
123  opts.parseCommandLine( argc, argv );
124 
125  char fileWriteOptions[] = "PARALLEL=WRITE_PART";
126 
127  if( !rankInGlobalComm )
128  {
129  std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1
130  << "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2
131  << "\n map file:" << mapFilename << "\n on tasks : " << startG4 << ":" << endG4 << "\n";
132  if( !no_regression_test )
133  {
134  std::cout << " check projection against baseline: " << baseline << "\n";
135  }
136  }
137 
138  // load files on 3 different communicators, groups
139  // first groups has task 0, second group tasks 0 and 1
140  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
141  // first groups has task 0, second group tasks 0 and 1
142  // coupler will be on joint tasks, will be on a third group (0 and 1, again)
143  MPI_Group atmPEGroup;
144  MPI_Comm atmComm;
145  ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
146  CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
147 
148  MPI_Group ocnPEGroup;
149  MPI_Comm ocnComm;
150  ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
151  CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
152 
153  // we will always have a coupler
154  MPI_Group couPEGroup;
155  MPI_Comm couComm;
156  ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
157  CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
158 
159  // atm_coupler
160  MPI_Group joinAtmCouGroup;
161  MPI_Comm atmCouComm;
162  ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
163  CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
164 
165  // ocn_coupler
166  MPI_Group joinOcnCouGroup;
167  MPI_Comm ocnCouComm;
168  ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
169  CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
170 
171  ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
172  CHECKIERR( ierr, "Cannot initialize iMOAB" )
173 
174  int cmpAtmAppID = -1;
175  iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
176  int cplAtmAppID = -1; // -1 means it is not initialized
177  iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
178 
179  int cmpOcnAppID = -1;
180  iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
181  int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
182  iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
183  iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
184 
185  if( couComm != MPI_COMM_NULL )
186  {
187  MPI_Comm_rank( couComm, &rankInCouComm );
188  // Register all the applications on the coupler PEs
189  ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
190  cplAtmPID ); // atm on coupler pes
191  CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
192 
193  ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
194  cplOcnPID ); // ocn on coupler pes
195  CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
196  }
197 
198  if( atmComm != MPI_COMM_NULL )
199  {
200  MPI_Comm_rank( atmComm, &rankInAtmComm );
201  ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
202  CHECKIERR( ierr, "Cannot register ATM App" )
203  ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
204  CHECKIERR( ierr, "Cannot load atm mesh" )
205  }
206 
207  if( ocnComm != MPI_COMM_NULL )
208  {
209  MPI_Comm_rank( ocnComm, &rankInOcnComm );
210  ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
211  CHECKIERR( ierr, "Cannot register OCN App" )
212  }
213  MPI_Barrier( MPI_COMM_WORLD );
214 
215  ierr =
216  setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
217  &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
218  CHECKIERR( ierr, "Cannot set-up target meshes" )
219 #ifdef VERBOSE
220  if( couComm != MPI_COMM_NULL )
221  {
222  char outputFileTgt3[] = "recvTgt.h5m";
223  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
224  CHECKIERR( ierr, "cannot write target mesh after receiving on coupler" )
225  }
226 #endif
227  CHECKIERR( ierr, "Cannot load and distribute target mesh" )
228  MPI_Barrier( MPI_COMM_WORLD );
229 
230  if( couComm != MPI_COMM_NULL )
231  {
232  // now load map between OCNx and ATMx on coupler PEs
233  ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
234  CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
235  }
236 
237  const std::string intx_from_file_identifier = "map-from-file";
238 
239  if( couComm != MPI_COMM_NULL )
240  {
241  int col_or_row = 0; // row based partition
242  int type = 3; // target is FV cell with global ID as DOFs
243  ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, cplOcnPID, &col_or_row, &type,
244  intx_from_file_identifier.c_str(), mapFilename.c_str() );
245  CHECKIERR( ierr, "failed to load map file from disk" );
246  }
247 
248  if( atmCouComm != MPI_COMM_NULL )
249  {
250  int type = types[0]; // FV
251  int direction = 1; // from source to coupler; will create a mesh on cplAtmPID
252  // because it is like "coverage", context will be cplocn
253  ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
254  &cmpatm, &cplocn, &direction );
255  CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
256 #ifdef VERBOSE
257  if( *cplAtmPID >= 0 )
258  {
259  char prefix[] = "atmcov";
260  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
261  CHECKIERR( ierr, "failed to write local mesh" );
262  }
263 #endif
264  }
265  MPI_Barrier( MPI_COMM_WORLD );
266 
267  int tagIndex[2];
268  int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
269  int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] /*FV*/;
270 
271  const char* bottomTempField = "AnalyticalSolnSrcExact";
272  const char* bottomTempProjectedField = "Target_proj";
273 
274  if( couComm != MPI_COMM_NULL )
275  {
276  ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
277  CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
278 
279  ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
280  CHECKIERR( ierr, "failed to define the field tag Target_proj" );
281  }
282 
283  if( analytic_field && ( atmComm != MPI_COMM_NULL ) ) // we are on source /atm pes
284  {
285  // cmpOcnPID, "T_proj;u_proj;v_proj;"
286  ierr = iMOAB_DefineTagStorage( cmpAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
287  CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
288 
289  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
290  /*
291  * Each process in the communicator will have access to a local mesh instance, which will contain the
292  * original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
293  * blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
294  * for local, ghost and total numbers.
295  */
296  ierr = iMOAB_GetMeshInfo( cmpAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
297  CHECKIERR( ierr, "failed to get num primary elems" );
298  int numAllElem = nelem[2];
299  int eetype = 1;
300 
301  if( types[0] == 2 ) // point cloud
302  {
303  numAllElem = nverts[2];
304  eetype = 0;
305  }
306  std::vector< double > vals;
307  int storLeng = atmCompNDoFs * numAllElem;
308  vals.resize( storLeng );
309  for( int k = 0; k < storLeng; k++ )
310  vals[k] = k;
311 
312  ierr = iMOAB_SetDoubleTagStorage( cmpAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
313  CHECKIERR( ierr, "cannot make analytical tag" )
314  }
315 
316  // need to make sure that the coverage mesh (created during intx method) received the tag that
317  // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
318  // need to change the migrate method to accommodate any GLL tag
319  // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
320  // (cplAtmPID), using the new coverage graph communicator
321 
322  // make the tag 0, to check we are actually sending needed data
323  {
324  if( cplAtmAppID >= 0 )
325  {
326  int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
327  /*
328  * Each process in the communicator will have access to a local mesh instance, which
329  * will contain the original cells in the local partition and ghost entities. Number of
330  * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
331  * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
332  * numbers.
333  */
334  ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
335  CHECKIERR( ierr, "failed to get num primary elems" );
336  int numAllElem = nelem[2];
337  int eetype = 1;
338  if( types[0] == 2 ) // Point cloud
339  {
340  eetype = 0; // vertices
341  numAllElem = nverts[2];
342  }
343  std::vector< double > vals;
344  int storLeng = atmCompNDoFs * numAllElem;
345 
346  vals.resize( storLeng );
347  for( int k = 0; k < storLeng; k++ )
348  vals[k] = 0.;
349 
350  ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
351  CHECKIERR( ierr, "cannot make tag nul" )
352 
353  // set the tag to 0
354  }
355  }
356 
357  const char* concat_fieldname = "AnalyticalSolnSrcExact";
358  const char* concat_fieldnameT = "Target_proj";
359 
360  {
361 
362  PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
363  if( atmComm != MPI_COMM_NULL )
364  {
365  // as always, use nonblocking sends
366  // this is for projection to ocean:
367  ierr = iMOAB_SendElementTag( cmpAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
368  CHECKIERR( ierr, "cannot send tag values" )
369  }
370  if( couComm != MPI_COMM_NULL )
371  {
372  // receive on atm on coupler pes, that was redistributed according to coverage
373  ierr = iMOAB_ReceiveElementTag( cplAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cmpatm );
374  CHECKIERR( ierr, "cannot receive tag values" )
375  }
376 
377  // we can now free the sender buffers
378  if( atmComm != MPI_COMM_NULL )
379  {
380  ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
381  CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
382  }
384 #ifdef VERBOSE
385  if( *cplAtmPID >= 0 )
386  {
387  char prefix[] = "atmcov_withdata";
388  ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
389  CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
390  }
391 #endif
392 
393  if( couComm != MPI_COMM_NULL )
394  {
395  /* We have the remapping weights now. Let us apply the weights onto the tag we defined
396  on the source mesh and get the projection on the target mesh */
397  PUSH_TIMER( "Apply Scalar projection weights" )
398  ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
399  concat_fieldname, concat_fieldnameT );
400  CHECKIERR( ierr, "failed to compute projection weight application" );
401  POP_TIMER( couComm, rankInCouComm )
402 
403  {
404  char outputFileTgt[] = "fOcnOnCpl5.h5m";
405  ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
406  CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
407  }
408  }
409 
410  // send the projected tag back to ocean pes, with send/receive tag
411  if( ocnComm != MPI_COMM_NULL )
412  {
413  int tagIndexIn2;
414  ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
415  &tagIndexIn2 );
416  CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
417  "Target_proj on ocn pes" );
418  }
419  // send the tag to ocean pes, from ocean mesh on coupler pes
420  // from couComm, using common joint comm ocn_coupler
421  // as always, use nonblocking sends
422  // original graph (context is -1_
423  if( couComm != MPI_COMM_NULL )
424  {
425  // need to use ocean comp id for context
426  context_id = cmpocn; // id for ocean on comp
427  ierr = iMOAB_SendElementTag( cplOcnPID, "Target_proj", &ocnCouComm, &context_id );
428  CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
429  }
430 
431  // receive on component 2, ocean
432  if( ocnComm != MPI_COMM_NULL )
433  {
434  context_id = cplocn; // id for ocean on coupler
435  ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "Target_proj", &ocnCouComm, &context_id );
436  CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
437  }
438 
439  if( couComm != MPI_COMM_NULL )
440  {
441  context_id = cmpocn;
442  ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
443  CHECKIERR( ierr, "cannot free buffers for Target_proj tag migration " )
444  }
445  MPI_Barrier( MPI_COMM_WORLD );
446 
447  if( ocnComm != MPI_COMM_NULL )
448  {
449 #ifdef VERBOSE
450  char outputFileOcn[] = "OcnWithProj.h5m";
451  ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
452  CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
453 #endif
454  // test results only for n == 1, for bottomTempProjectedField
455  if( !no_regression_test )
456  {
457  // the same as remap test
458  // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
459  // first get GlobalIds from ocn, and fields:
460  int nverts[3], nelem[3];
461  ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
462  CHECKIERR( ierr, "failed to get ocn mesh info" );
463  std::vector< int > gidElems;
464  gidElems.resize( nelem[2] );
465  std::vector< double > tempElems;
466  tempElems.resize( nelem[2] );
467  // get global id storage
468  const std::string GidStr = "GLOBAL_ID"; // hard coded too
469  int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
470  ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
471  CHECKIERR( ierr, "failed to define global id tag" );
472 
473  int ent_type = 1;
474  ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
475  CHECKIERR( ierr, "failed to get global ids" );
476  ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, bottomTempProjectedField, &nelem[2], &ent_type,
477  &tempElems[0] );
478  CHECKIERR( ierr, "failed to get temperature field" );
479  int err_code = 1;
480  check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
481  if( 0 == err_code )
482  std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
483  }
484  }
485 
486  } // end loop iterations n
487 
488  if( couComm != MPI_COMM_NULL )
489  {
490  ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
491  CHECKIERR( ierr, "cannot deregister app intx AO" )
492  }
493  if( ocnComm != MPI_COMM_NULL )
494  {
495  ierr = iMOAB_DeregisterApplication( cmpOcnPID );
496  CHECKIERR( ierr, "cannot deregister app OCN1" )
497  }
498 
499  if( atmComm != MPI_COMM_NULL )
500  {
501  ierr = iMOAB_DeregisterApplication( cmpAtmPID );
502  CHECKIERR( ierr, "cannot deregister app ATM1" )
503  }
504 
505  if( couComm != MPI_COMM_NULL )
506  {
507  ierr = iMOAB_DeregisterApplication( cplOcnPID );
508  CHECKIERR( ierr, "cannot deregister app OCNX" )
509  }
510 
511  if( couComm != MPI_COMM_NULL )
512  {
513  ierr = iMOAB_DeregisterApplication( cplAtmPID );
514  CHECKIERR( ierr, "cannot deregister app ATMX" )
515  }
516 
517  //#endif
518  ierr = iMOAB_Finalize();
519  CHECKIERR( ierr, "did not finalize iMOAB" )
520 
521  // free atm coupler group and comm
522  if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
523  MPI_Group_free( &joinAtmCouGroup );
524  if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
525 
526  if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
527  // free ocn - coupler group and comm
528  if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
529  MPI_Group_free( &joinOcnCouGroup );
530 
531  if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
532 
533  MPI_Group_free( &atmPEGroup );
534 
535  MPI_Group_free( &ocnPEGroup );
536 
537  MPI_Group_free( &couPEGroup );
538  MPI_Group_free( &jgroup );
539 
540  MPI_Finalize();
541 
542  return 0;
543 }