17 #error mbtempest tool requires MPI configuration
34 #ifndef MOAB_HAVE_TEMPESTREMAP
35 #error The climate coupler test example requires MOAB configuration with TempestRemap
38 int main(
int argc,
char* argv[] )
43 std::string
readopts(
"PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
50 MPI_Init( &argc, &argv );
56 std::string
atmFilename = TestDir +
"unittest/srcWithSolnTag.h5m";
72 std::string ocnFilename = TestDir +
"unittest/outTri15_8.h5m";
73 std::string mapFilename = TestDir +
"unittest/mapNE20_FV15.nc";
75 std::string baseline = TestDir +
"unittest/baseline2.txt";
76 int rankInOcnComm = -1;
77 int cmpocn = 17, cplocn = 18,
80 int rankInCouComm = -1;
89 int repartitioner_scheme = 0;
90 #ifdef MOAB_HAVE_ZOLTAN
91 repartitioner_scheme = 2;
99 opts.
addOpt< std::string >(
"atmosphere,t",
"atm mesh filename (source)", &
atmFilename );
100 opts.
addOpt< std::string >(
"ocean,m",
"ocean mesh filename (target)", &ocnFilename );
101 opts.
addOpt< std::string >(
"map_file,w",
"map file from source to target", &mapFilename );
103 opts.
addOpt<
int >(
"startAtm,a",
"start task for atmosphere layout", &
startG1 );
104 opts.
addOpt<
int >(
"endAtm,b",
"end task for atmosphere layout", &
endG1 );
106 opts.
addOpt<
int >(
"startOcn,c",
"start task for ocean layout", &
startG2 );
107 opts.
addOpt<
int >(
"endOcn,d",
"end task for ocean layout", &
endG2 );
109 opts.
addOpt<
int >(
"startCoupler,g",
"start task for coupler layout", &startG4 );
110 opts.
addOpt<
int >(
"endCoupler,j",
"end task for coupler layout", &endG4 );
112 int types[2] = { 3, 3 };
113 int disc_orders[2] = { 1, 1 };
114 opts.
addOpt<
int >(
"typeSource,x",
"source type", &types[0] );
115 opts.
addOpt<
int >(
"typeTarget,y",
"target type", &types[1] );
116 opts.
addOpt<
int >(
"orderSource,u",
"source order", &disc_orders[0] );
117 opts.
addOpt<
int >(
"orderTarget,v",
"target oorder", &disc_orders[1] );
118 bool analytic_field =
false;
119 opts.
addOpt<
void >(
"analytic,q",
"analytic field", &analytic_field );
121 bool no_regression_test =
false;
122 opts.
addOpt<
void >(
"no_regression,r",
"do not do regression test against baseline 1", &no_regression_test );
130 <<
"\n ocn file: " << ocnFilename <<
"\n on tasks : " <<
startG2 <<
":" <<
endG2
131 <<
"\n map file:" << mapFilename <<
"\n on tasks : " << startG4 <<
":" << endG4 <<
"\n";
132 if( !no_regression_test )
134 std::cout <<
" check projection against baseline: " << baseline <<
"\n";
143 MPI_Group atmPEGroup;
146 CHECKIERR(
ierr,
"Cannot create atm MPI group and communicator " )
148 MPI_Group ocnPEGroup;
151 CHECKIERR(
ierr,
"Cannot create ocn MPI group and communicator " )
154 MPI_Group couPEGroup;
157 CHECKIERR(
ierr,
"Cannot create cpl MPI group and communicator " )
160 MPI_Group joinAtmCouGroup;
163 CHECKIERR(
ierr,
"Cannot create joint atm cou communicator" )
166 MPI_Group joinOcnCouGroup;
169 CHECKIERR(
ierr,
"Cannot create joint ocn cou communicator" )
174 int cmpAtmAppID = -1;
176 int cplAtmAppID = -1;
179 int cmpOcnAppID = -1;
181 int cplOcnAppID = -1, cplAtmOcnAppID = -1;
185 if( couComm != MPI_COMM_NULL )
187 MPI_Comm_rank( couComm, &rankInCouComm );
198 if( atmComm != MPI_COMM_NULL )
207 if( ocnComm != MPI_COMM_NULL )
209 MPI_Comm_rank( ocnComm, &rankInOcnComm );
217 &couPEGroup, &ocnCouComm, ocnFilename,
readopts,
nghlay, repartitioner_scheme );
220 if( couComm != MPI_COMM_NULL )
222 char outputFileTgt3[] =
"recvTgt.h5m";
224 CHECKIERR(
ierr,
"cannot write target mesh after receiving on coupler" )
230 if( couComm != MPI_COMM_NULL )
234 CHECKIERR(
ierr,
"Cannot register ocn_atm map instance over coupler pes " )
237 const std::string intx_from_file_identifier =
"map-from-file";
239 if( couComm != MPI_COMM_NULL )
243 ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, cplOcnPID, &col_or_row, &type,
244 intx_from_file_identifier.c_str(), mapFilename.c_str() );
248 if( atmCouComm != MPI_COMM_NULL )
253 ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
254 &
cmpatm, &cplocn, &direction );
255 CHECKIERR(
ierr,
"failed to migrate mesh for atm on coupler" );
257 if( *cplAtmPID >= 0 )
259 char prefix[] =
"atmcov";
269 int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] ;
271 const char* bottomTempField =
"AnalyticalSolnSrcExact";
272 const char* bottomTempProjectedField =
"Target_proj";
274 if( couComm != MPI_COMM_NULL )
277 CHECKIERR(
ierr,
"failed to define the field tag AnalyticalSolnSrcExact" );
280 CHECKIERR(
ierr,
"failed to define the field tag Target_proj" );
283 if( analytic_field && ( atmComm != MPI_COMM_NULL ) )
287 CHECKIERR(
ierr,
"failed to define the field tag AnalyticalSolnSrcExact" );
289 int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
298 int numAllElem = nelem[2];
303 numAllElem = nverts[2];
306 std::vector< double > vals;
307 int storLeng = atmCompNDoFs * numAllElem;
308 vals.resize( storLeng );
309 for(
int k = 0; k < storLeng; k++ )
324 if( cplAtmAppID >= 0 )
326 int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
336 int numAllElem = nelem[2];
341 numAllElem = nverts[2];
343 std::vector< double > vals;
344 int storLeng = atmCompNDoFs * numAllElem;
346 vals.resize( storLeng );
347 for(
int k = 0; k < storLeng; k++ )
357 const char* concat_fieldname =
"AnalyticalSolnSrcExact";
358 const char* concat_fieldnameT =
"Target_proj";
362 PUSH_TIMER(
"Send/receive data from atm component to coupler in ocn context" )
363 if( atmComm != MPI_COMM_NULL )
367 ierr = iMOAB_SendElementTag( cmpAtmPID,
"AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
370 if( couComm != MPI_COMM_NULL )
373 ierr = iMOAB_ReceiveElementTag( cplAtmPID,
"AnalyticalSolnSrcExact", &atmCouComm, &
cmpatm );
378 if( atmComm != MPI_COMM_NULL )
380 ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );
381 CHECKIERR(
ierr,
"cannot free buffers used to resend atm tag towards the coverage mesh" )
385 if( *cplAtmPID >= 0 )
387 char prefix[] =
"atmcov_withdata";
389 CHECKIERR(
ierr,
"failed to write local atm cov mesh with data" );
393 if( couComm != MPI_COMM_NULL )
397 PUSH_TIMER(
"Apply Scalar projection weights" )
398 ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
399 concat_fieldname, concat_fieldnameT );
400 CHECKIERR(
ierr,
"failed to compute projection weight application" );
404 char outputFileTgt[] =
"fOcnOnCpl5.h5m";
411 if( ocnComm != MPI_COMM_NULL )
416 CHECKIERR(
ierr,
"failed to define the field tag for receiving back the tag "
417 "Target_proj on ocn pes" );
423 if( couComm != MPI_COMM_NULL )
427 ierr = iMOAB_SendElementTag( cplOcnPID,
"Target_proj", &ocnCouComm, &context_id );
428 CHECKIERR(
ierr,
"cannot send tag values back to ocean pes" )
432 if( ocnComm != MPI_COMM_NULL )
435 ierr = iMOAB_ReceiveElementTag( cmpOcnPID,
"Target_proj", &ocnCouComm, &context_id );
436 CHECKIERR(
ierr,
"cannot receive tag values from ocean mesh on coupler pes" )
439 if( couComm != MPI_COMM_NULL )
442 ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
443 CHECKIERR(
ierr,
"cannot free buffers for Target_proj tag migration " )
447 if( ocnComm != MPI_COMM_NULL )
450 char outputFileOcn[] =
"OcnWithProj.h5m";
455 if( !no_regression_test )
460 int nverts[3], nelem[3];
463 std::vector< int > gidElems;
464 gidElems.resize( nelem[2] );
465 std::vector< double > tempElems;
466 tempElems.resize( nelem[2] );
468 const std::string GidStr =
"GLOBAL_ID";
480 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
482 std::cout <<
" passed baseline test atm2ocn on ocean task " << rankInOcnComm <<
"\n";
488 if( couComm != MPI_COMM_NULL )
493 if( ocnComm != MPI_COMM_NULL )
499 if( atmComm != MPI_COMM_NULL )
505 if( couComm != MPI_COMM_NULL )
511 if( couComm != MPI_COMM_NULL )
522 if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
523 MPI_Group_free( &joinAtmCouGroup );
524 if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
526 if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
528 if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
529 MPI_Group_free( &joinOcnCouGroup );
531 if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
533 MPI_Group_free( &atmPEGroup );
535 MPI_Group_free( &ocnPEGroup );
537 MPI_Group_free( &couPEGroup );
538 MPI_Group_free( &
jgroup );