122 MPI_Reduce( &locElapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, pcomm->
comm() );
123 MPI_Reduce( &locElapsed, &avgElapsed, 1, MPI_DOUBLE, MPI_SUM, 0, pcomm->
comm() );
125 maxElapsed = locElapsed;
126 avgElapsed = locElapsed;
131 std::cout <<
"[LOG] Time taken to " <<
opName.c_str() <<
": max = " << maxElapsed
132 <<
", avg = " << avgElapsed <<
"\n";
143 std::string expectedFName =
"output.exo";
144 std::string expectedMethod =
"fv";
145 std::string expectedFVMethod =
"none";
146 std::string expectedDofTagName =
"GLOBAL_ID";
147 int expectedOrder = 1;
151 std::cout <<
"Command line options provided to mbtempest:\n ";
152 for(
int iarg = 0; iarg < argc; ++iarg )
153 std::cout << argv[iarg] <<
" ";
154 std::cout << std::endl << std::endl;
157 opts.
addOpt<
int >(
"type,t",
158 "Type of mesh (default=CS; Choose from [CS=0, RLL=1, ICO=2, OVERLAP_FILES=3, "
159 "OVERLAP_MEMORY=4, OVERLAP_MOAB=5])",
162 opts.
addOpt<
int >(
"res,r",
"Resolution of the mesh (default=5)", &
blockSize );
164 opts.
addOpt<
void >(
"dual,d",
"Output the dual of the mesh (relevant only for ICO mesh type)", &
computeDual );
166 opts.
addOpt< std::string >(
"file,f",
"Output computed mesh or remapping weights to specified filename",
168 opts.
addOpt< std::string >(
169 "load,l",
"Input mesh filenames for source and target meshes. (relevant only when computing weights)",
172 opts.
addOpt<
void >(
"advfront,a",
173 "Use the advancing front intersection instead of the Kd-tree based algorithm "
174 "to compute mesh intersections. (relevant only when computing weights)" );
176 opts.
addOpt< std::string >(
"intx,i",
"Output TempestRemap intersection mesh filename", &
intxFilename );
178 opts.
addOpt<
void >(
"weights,w",
179 "Compute and output the weights using the overlap mesh (generally "
180 "relevant only for OVERLAP mesh)",
183 opts.
addOpt< std::string >(
"method,m",
"Discretization method for the source and target solution fields",
186 opts.
addOpt<
int >(
"order,o",
"Discretization orders for the source and target solution fields",
189 opts.
addOpt< std::string >(
"global_id,g",
190 "Tag name that contains the global DoF IDs for source and target solution fields",
191 &expectedDofTagName );
193 opts.
addOpt<
void >(
"noconserve",
194 "Do not apply conservation to the resultant weights (relevant only "
195 "when computing weights)",
198 opts.
addOpt<
void >(
"volumetric",
199 "Apply a volumetric projection to compute the weights (relevant only "
200 "when computing weights)",
203 opts.
addOpt<
int >(
"monotonicity",
"Ensure monotonicity in the weight generation. Options=[0,1,2,3]",
206 opts.
addOpt<
void >(
"gnomonic",
"Use Gnomonic plane projections to compute coverage mesh.",
209 opts.
addOpt< std::string >(
"fvmethod",
210 "Sub-type method for FV-FV projections (invdist, delaunay, bilin, "
211 "intbilin, intbilingb, none. Default: none)",
214 opts.
addOpt<
void >(
"enforce_convexity",
"check convexity of input meshes to compute mesh intersections",
217 opts.
addOpt<
void >(
"nobubble",
"do not use bubble on interior of spectral element nodes",
222 "Use sparse solver for constraints when we have high-valence (typical with high-res RLL mesh)",
225 opts.
addOpt<
void >(
"rrmgrids",
226 "At least one of the meshes is a regionally refined grid (relevant to "
227 "accelerate intersection computation)",
230 opts.
addOpt<
void >(
"checkmap",
"Check the generated map for conservation and consistency", &
fCheck );
232 opts.
addOpt<
void >(
"verify",
233 "Verify the accuracy of the maps by projecting analytical functions "
234 "from source to target "
235 "grid by applying the maps",
309 if( expectedFVMethod !=
"none" )
311 mapOptions.strMethod += expectedFVMethod +
";";
339 expectedFName.clear();
361 std::string opts =
"";
364 size_t lastindex = filename.find_last_of(
"." );
365 std::string extension = filename.substr( lastindex + 1, filename.size() );
366 if( extension ==
"h5m" )
367 return "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS;";
368 else if( extension ==
"nc" )
371 opts =
"PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;";
376 NcFile
ncFile( filename.c_str(), NcFile::ReadOnly );
378 _EXCEPTION1(
"Unable to open grid file \"%s\" for reading", filename.c_str() );
381 int iSCRIPFormat = 0, iMPASFormat = 0, iESMFFormat = 0;
382 for(
int i = 0; i <
ncFile.num_dims(); i++ )
384 std::string strDimName =
ncFile.get_dim( i )->name();
385 if( strDimName ==
"grid_size" || strDimName ==
"grid_corners" || strDimName ==
"grid_rank" )
387 if( strDimName ==
"nodeCount" || strDimName ==
"elementCount" ||
388 strDimName ==
"maxNodePElement" )
390 if( strDimName ==
"nCells" || strDimName ==
"nEdges" || strDimName ==
"nVertices" ||
391 strDimName ==
"vertexDegree" )
396 if( iESMFFormat == 3 )
398 opts =
"PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=;";
400 if( iSCRIPFormat == 3 )
402 opts =
"PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;";
404 if( iMPASFormat == 4 )
406 opts =
"PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;"
407 "PARALLEL_RESOLVE_SHARED_ENTS;NO_EDGES;NO_MIXED_ELEMENTS;VARIABLE=;";
413 int line_size = opts.size();
414 MPI_Bcast( &line_size, 1, MPI_INT, 0, MPI_COMM_WORLD );
415 if( ctx.
proc_id != 0 ) opts.resize( line_size );
416 MPI_Bcast(
const_cast< char*
>( opts.data() ), line_size, MPI_CHAR, 0, MPI_COMM_WORLD );
420 return "PARALLEL=BCAST_DELETE;PARTITION=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS;";
425 int main(
int argc,
char* argv[] )
428 NcError
error( NcError::verbose_nonfatal );
429 std::stringstream sstr;
430 std::string historyStr;
432 int proc_id = 0, nprocs = 1;
434 MPI_Init( &argc, &argv );
435 MPI_Comm_rank( MPI_COMM_WORLD, &proc_id );
436 MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
447 for(
int ia = 0; ia < argc; ++ia )
448 historyStr += std::string( argv[ia] ) +
" ";
455 const char* writeOptions = ( nprocs > 1 ?
"PARALLEL=WRITE_PART" :
"" );
458 const char* writeOptions =
"";
462 const double radius_src = 1.0 ;
463 const double radius_dest = 1.0 ;
477 #ifdef MOAB_HAVE_TEMPESTREMAP
483 Mesh* tempest_mesh =
new Mesh();
488 const double epsrel = ReferenceTolerance;
495 const double boxeps = 1e-1;
501 assert( runCtx->
meshes.size() == 3 );
518 outputFormatter.
printf( 0,
"The source set contains %lu vertices and %lu elements \n", rintxverts.
size(),
524 outputFormatter.
printf( 0,
"The target set contains %lu vertices and %lu elements \n", bintxverts.
size(),
533 runCtx->
timer_push(
"setup the intersector" );
541 mbintx->set_parallel_comm( pcomm );
547 rval = mbintx->build_processor_euler_boxes( runCtx->
meshsets[1], local_verts );
MB_CHK_ERR( rval );
553 rval = mbintx->construct_covering_set( runCtx->
meshsets[0], covering_set );
MB_CHK_ERR( rval );
560 runCtx->
timer_push(
"compute intersections with MOAB" );
573 outputFormatter.
printf( 0,
"The intersection set contains %lu elements and %lu vertices \n",
574 intxelems.
size(), intxverts.
size() );
576 double initial_sarea =
579 double initial_tarea =
582 double intx_area = areaAdaptor.
area_on_sphere( mbCore, intxset, radius_src );
584 outputFormatter.
printf( 0,
"mesh areas: source = %12.10f, target = %12.10f, intersection = %12.10f \n",
585 initial_sarea, initial_tarea, intx_area );
586 outputFormatter.
printf( 0,
"relative error w.r.t source = %12.10e, target = %12.10e \n",
587 fabs( intx_area - initial_sarea ) / initial_sarea,
588 fabs( intx_area - initial_tarea ) / initial_tarea );
596 runCtx->
timer_push(
"compute weights with the Tempest meshes" );
598 OfflineMap weightMap;
599 int err = GenerateOfflineMapWithMeshes( *runCtx->
meshes[0], *runCtx->
meshes[1], *runCtx->
meshes[2],
605 std::map< std::string, std::string > mapAttributes;
608 rval = moab::MB_FAILURE;
612 weightMap.Write(
"outWeights.nc", mapAttributes );
634 outputFormatter.
printf( 0,
"The source set contains %lu vertices and %lu elements \n", srcverts.
size(),
647 outputFormatter.
printf( 0,
"The target set contains %lu vertices and %lu elements \n", tgtverts.
size(),
655 runCtx->
timer_push(
"construct covering set for intersection" );
661 runCtx->
timer_push(
"setup and compute mesh intersections" );
668 double local_areas[3],
676 MPI_Allreduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
678 global_areas[0] = local_areas[0];
679 global_areas[1] = local_areas[1];
680 global_areas[2] = local_areas[2];
684 outputFormatter.
printf( 0,
685 "initial area: source mesh = %12.14f, target mesh = "
686 "%12.14f, overlap mesh = %12.14f\n",
687 global_areas[0], global_areas[1], global_areas[2] );
692 outputFormatter.
printf( 0,
"relative error w.r.t source = %12.14e, and target = %12.14e\n",
693 fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
694 fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
719 size_t lastindex = runCtx->
intxFilename.find_last_of(
"." );
721 sstr << runCtx->
intxFilename.substr( 0, lastindex ) <<
".h5m";
723 std::cout <<
"Writing out the MOAB intersection mesh file to " << sstr.str() << std::endl;
726 rval = mbCore->
write_file( sstr.str().c_str(), NULL, writeOptions, &writableOverlapSet, 1 );
MB_CHK_ERR( rval );
732 if( !runCtx->
proc_id ) std::cout << std::endl;
734 runCtx->
timer_push(
"setup computation of weights" );
739 runCtx->
timer_push(
"compute weights with TempestRemap" );
753 const double dNormalTolerance = 1.0E-8;
754 const double dStrictTolerance = 1.0E-12;
756 dNormalTolerance, dStrictTolerance );
761 std::map< std::string, std::string > attrMap;
763 attrMap[
"Title"] =
"MOAB-TempestRemap (mbtempest) Offline Regridding Weight Generator";
764 attrMap[
"normalization"] =
"ovarea";
765 attrMap[
"remap_options"] = runCtx->
mapOptions.strMethod;
772 attrMap[
"concave_a"] = runCtx->
mapOptions.fSourceConcave ?
"true" :
"false";
775 attrMap[
"concave_b"] = runCtx->
mapOptions.fTargetConcave ?
"true" :
"false";
776 attrMap[
"bubble"] = runCtx->
mapOptions.fNoBubble ?
"false" :
"true";
777 attrMap[
"history"] = historyStr;
789 runCtx->
timer_push(
"describe a solution on source grid" );
797 runCtx->
timer_push(
"describe a solution on target grid" );
802 &tgtProjectedFunction,
"ProjectedSolnTgt" );
MB_CHK_ERR( rval );
807 runCtx->
timer_push(
"compute solution projection on target grid" );
817 std::vector< int > globIds;
818 globIds.resize( tgtCells.
size() );
819 std::vector< double > vals;
820 vals.resize( tgtCells.
size() );
827 fs.open( runCtx->
baselineFile.c_str(), std::fstream::out );
828 fs << std::setprecision( 15 );
829 for(
size_t i = 0; i < tgtCells.
size(); i++ )
830 fs << globIds[i] <<
" " << vals[i] <<
"\n";
836 runCtx->
timer_push(
"compute error metrics against analytical solution on target grid" );
837 std::map< std::string, double > errMetrics;
839 tgtProjectedFunction, errMetrics,
true );
MB_CHK_ERR( rval );
866 outputFormatter.
printf( 0,
"Creating TempestRemap Mesh object ...\n" );
877 rval = moab::MB_FAILURE;
881 ctx.
meshes.push_back( tempest_mesh );
903 err = GenerateOverlapWithMeshes( *ctx.
meshes[0], *ctx.
meshes[1], *tempest_mesh,
"" ,
904 "NetCDF4",
"exact",
false );
908 rval = moab::MB_FAILURE;
925 const double radius_src = 1.0 ;
926 const double radius_dest = 1.0 ;
930 std::vector< int > smetadata, tmetadata;
934 if( smetadata.size() )
946 addititional_read_opts_tgt.c_str() );
MB_CHK_ERR( rval );
947 if( tmetadata.size() )
964 rval = moab::MB_FAILURE;
968 ctx.
meshes.push_back( tempest_mesh );
973 err = GenerateRLLMesh( *tempest_mesh,
987 rval = moab::MB_FAILURE;
991 ctx.
meshes.push_back( tempest_mesh );
1000 rval = moab::MB_FAILURE;
1004 ctx.
meshes.push_back( tempest_mesh );
1010 std::cout <<
"Tempest Mesh is not a complete object; Quitting...";
1022 return ( 2.0 + cos( dLat ) * cos( dLat ) * cos( 2.0 * dLon ) );
1027 return ( 2.0 + pow( sin( 2.0 * dLat ), 16.0 ) * cos( 16.0 * dLon ) );
1038 const double dLon0 = 0.0;
1039 const double dLat0 = 0.6;
1040 const double dR0 = 3.0;
1041 const double dD = 5.0;
1042 const double dT = 6.0;
1047 double dSinC = sin( dLat0 );
1048 double dCosC = cos( dLat0 );
1049 double dCosT = cos( dLat );
1050 double dSinT = sin( dLat );
1052 double dTrm = dCosT * cos( dLon - dLon0 );
1053 double dX = dSinC * dTrm - dCosC * dSinT;
1054 double dY = dCosT * sin( dLon - dLon0 );
1055 double dZ = dSinC * dSinT + dCosC * dTrm;
1057 dLon = atan2( dY, dX );
1065 double dRho = dR0 * cos( dLat );
1066 double dVt = 3.0 * sqrt( 3.0 ) / 2.0 / cosh( dRho ) / cosh( dRho ) * tanh( dRho );
1075 dOmega = dVt / dRho;
1078 return ( 1.0 - tanh( dRho / dD * sin( dLon - dOmega * dT ) ) );