Example program that shows the use case for performing tag data exchange between parallel processors in order to sync data on shared entities. The reduction operation on tag data is also shown where the user can perform any of the actions supported by MPI_Op on data residing on shared entities.
This example :
#ifdef MOAB_HAVE_MPI
#endif
#include <iostream>
#include <string>
#include <sstream>
using namespace std;
#define MPICHKERR( CODE, MSG ) \
do \
{ \
if( 0 != ( CODE ) ) \
{ \
cerr << ( MSG ) << endl; \
MPI_Finalize(); \
} \
} while( false )
#define dbgprint( MSG ) \
do \
{ \
if( !rank ) cerr << MSG << endl; \
} while( false )
#define dbgprintall( MSG ) \
do \
{ \
cerr << "[" << rank << "]: " << MSG << endl; \
} while( false )
{
if( argc > 1 )
filename = string( argv[1] );
else
filename = string(
MESH_DIR ) + string(
"/64bricks_1khex.h5m" );
if( argc > 2 )
tagName = string( argv[2] );
else
tagName = "USERTAG";
if( argc > 3 )
tagValues = atof( argv[3] );
else
tagValues = 1.0;
}
int main(
int argc,
char** argv )
{
#ifdef MOAB_HAVE_MPI
int ierr, rank;
string filename, tagName;
double tagValue;
MPI_Comm comm = MPI_COMM_WORLD;
string read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_"
"ENTS;PARTITION_DISTRIBUTE;PARALLEL_GHOSTS=3.0.1;PARALLEL_COMM=0";
if( argc < 1 )
{
cerr << "Usage: ";
cerr << argv[0] << " <file_name> <tag_name> <tag_value>" << endl;
cerr << "file_name : mesh file name" << endl;
cerr << "tag_name : name of tag to add to mesh" << endl;
cerr << "tag_value : a double valued string to set for highest-dimensional entities in "
"the mesh for the named tag"
<< endl;
ierr = MPI_Finalize();
MPICHKERR( ierr,
"MPI_Finalize failed; Aborting" );
return 1;
}
ierr = MPI_Init( &argc, &argv );
ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
dbgprint(
"********** reduce_exchange_tags **********\n" );
Interface* mbi = new( std::nothrow ) Core;
if( NULL == mbi ) return 1;
dbgprint(
" Tag: Name=" << tagName <<
" Value=" << tagValue << endl );
err = mbi->load_file( filename.c_str(), &rootset, read_options.c_str() );
MB_CHK_SET_ERR( err,
"MOAB::load_file failed" );
dbgprint(
"-Creating tag handle " << tagName <<
"..." );
Tag tagReduce, tagExchange;
{
stringstream sstr;
sstr << tagName << "_EXC";
sstr.str( "" );
sstr << tagName << "_RED";
}
dbgprint(
"-Exchanging tags between processors " );
{
Range partEnts, dimEnts;
{
err = mbi->get_entities_by_dimension( rootset,
dim, dimEnts,
false );
MB_CHK_ERR( err );
vector< int > tagValues( dimEnts.size(),
static_cast< int >( tagValue ) * ( rank + 1 ) * (
dim + 1 ) );
err = mbi->tag_set_data( tagExchange, dimEnts, &tagValues[0] );
MB_CHK_SET_ERR( err,
"Setting local tag data failed during exchange phase" );
partEnts.merge( dimEnts );
}
err = parallel_communicator->exchange_tags( tagExchange, partEnts );
MB_CHK_SET_ERR( err,
"Exchanging tags between processors failed" );
}
dbgprint(
"-Reducing tags between processors " );
{
Range partEnts;
err = parallel_communicator->get_part_entities( partEnts );
MB_CHK_SET_ERR( err,
"ParallelComm::get_part_entities failed" );
dbgprintall(
"Number of Partitioned entities: " << partEnts.size() );
MPI_Barrier( comm );
vector< double > tagValues( partEnts.size(), tagValue * ( rank + 1 ) );
err = mbi->tag_set_data( tagReduce, partEnts, &tagValues[0] );
MB_CHK_SET_ERR( err,
"Setting local tag data failed during reduce phase" );
Range dummy;
err = parallel_communicator->reduce_tags( tagReduce, MPI_SUM, dummy );
MB_CHK_SET_ERR( err,
"Reducing tags between processors failed" );
}
err = mbi->write_file(
"test.h5m",
"H5M",
"PARALLEL=WRITE_PART" );
MB_CHK_ERR( err );
delete mbi;
dbgprint(
"\n********** reduce_exchange_tags DONE! **********" );
MPI_Finalize();
#else
std::cout << " compile with MPI and HDF5 for this example to work \n";
#endif
return 0;
}