Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
ReduceExchangeTags.cpp
Go to the documentation of this file.
1 /** @example ReduceExchangeTags.cpp
2  * \brief Example program that shows the use case for performing tag data exchange
3  * between parallel processors in order to sync data on shared entities. The reduction
4  * operation on tag data is also shown where the user can perform any of the actions supported
5  * by MPI_Op on data residing on shared entities. \n
6  *
7  * <b>This example </b>:
8  * -# Initialize MPI and instantiate MOAB
9  * -# Get user options: Input mesh file name, tag name (default: USERTAG), tag value
10  * (default: 1.0)
11  * -# Create the root and partition sets
12  * -# Instantiate ParallelComm and read the mesh file in parallel using appropriate options
13  * -# Create two tags: USERTAG_EXC (exchange) and USERTAG_RED (reduction)
14  * -# Set tag data and exchange shared entity information between processors
15  * -# Get entities in all dimensions and set local (current rank, dimension) dependent data for
16  * exchange tag (USERTAG_EXC)
17  * -# Perform exchange of tag data so that data on shared entities are synced via
18  * ParallelCommunicator.
19  * -# Set tag data and reduce shared entity information between processors using MPI_SUM
20  * -# Get higher dimensional entities in the current partition and set local (current rank)
21  * dependent data for reduce tag (USERTAG_EXC)
22  * -# Perform the reduction operation (MPI_SUM) on shared entities via ParallelCommunicator.
23  * -# Destroy the MOAB instance and finalize MPI
24  *
25  * <b>To run:</b> \n mpiexec -n 2 ./ReduceExchangeTags <mesh_file> <tag_name> <tag_value> \n
26  * <b>Example:</b> \n mpiexec -n 2 ./ReduceExchangeTags ../MeshFiles/unittest/64bricks_1khex.h5m
27  * USERTAG 100 \n
28  *
29  */
30 
31 #include "moab/Core.hpp"
32 #ifdef MOAB_HAVE_MPI
33 #include "moab/ParallelComm.hpp"
34 #endif
35 #include "MBParallelConventions.h"
36 #include <iostream>
37 #include <string>
38 #include <sstream>
39 
40 using namespace moab;
41 using namespace std;
42 
43 // Error routines for use with MPI API
44 #define MPICHKERR( CODE, MSG ) \
45  do \
46  { \
47  if( 0 != ( CODE ) ) \
48  { \
49  cerr << ( MSG ) << endl; \
50  MPI_Finalize(); \
51  } \
52  } while( false )
53 
54 #define dbgprint( MSG ) \
55  do \
56  { \
57  if( !rank ) cerr << MSG << endl; \
58  } while( false )
59 
60 #define dbgprintall( MSG ) \
61  do \
62  { \
63  cerr << "[" << rank << "]: " << MSG << endl; \
64  } while( false )
65 
66 // Function to parse input parameters
67 ErrorCode get_file_options( int argc, char** argv, string& filename, string& tagName, double& tagValues )
68 {
69  // Get mesh filename
70  if( argc > 1 )
71  filename = string( argv[1] );
72  else
73  filename = string( MESH_DIR ) + string( "/64bricks_1khex.h5m" );
74 
75  // Get tag selection options
76  if( argc > 2 )
77  tagName = string( argv[2] );
78  else
79  tagName = "USERTAG";
80 
81  if( argc > 3 )
82  tagValues = atof( argv[3] );
83  else
84  tagValues = 1.0;
85 
86  return MB_SUCCESS;
87 }
88 
89 //
90 // Start of main test program
91 //
92 int main( int argc, char** argv )
93 {
94 #ifdef MOAB_HAVE_MPI
95  ErrorCode err;
96  int ierr, rank;
97  string filename, tagName;
98  double tagValue;
99  MPI_Comm comm = MPI_COMM_WORLD;
100  /// Parallel Read options:
101  /// PARALLEL = type {READ_PART}
102  /// PARTITION = PARALLEL_PARTITION : Partition as you read
103  /// PARALLEL_RESOLVE_SHARED_ENTS : Communicate to all processors to get the shared adjacencies
104  /// consistently in parallel PARALLEL_GHOSTS : a.b.c
105  /// : a = 3 - highest dimension of entities
106  /// : b = 0 -
107  /// : c = 1 - number of layers
108  /// PARALLEL_COMM = index
109  string read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_"
110  "ENTS;PARTITION_DISTRIBUTE;PARALLEL_GHOSTS=3.0.1;PARALLEL_COMM=0";
111 
112  // Print usage if not enough arguments
113  if( argc < 1 )
114  {
115  cerr << "Usage: ";
116  cerr << argv[0] << " <file_name> <tag_name> <tag_value>" << endl;
117  cerr << "file_name : mesh file name" << endl;
118  cerr << "tag_name : name of tag to add to mesh" << endl;
119  cerr << "tag_value : a double valued string to set for highest-dimensional entities in "
120  "the mesh for the named tag"
121  << endl;
122 
123  ierr = MPI_Finalize();
124  MPICHKERR( ierr, "MPI_Finalize failed; Aborting" );
125 
126  return 1;
127  }
128 
129  // Initialize MPI first
130  ierr = MPI_Init( &argc, &argv );
131  MPICHKERR( ierr, "MPI_Init failed" );
132 
133  ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
134  MPICHKERR( ierr, "MPI_Comm_rank failed" );
135 
136  dbgprint( "********** reduce_exchange_tags **********\n" );
137 
138  // Create the moab instance
139  Interface* mbi = new( std::nothrow ) Core;
140  if( NULL == mbi ) return 1;
141 
142  // Get the input options
143  err = get_file_options( argc, argv, filename, tagName, tagValue );MB_CHK_SET_ERR( err, "get_file_options failed" );
144 
145  // Print out the input parameters
146  dbgprint( " Input Parameters - " );
147  dbgprint( " Filenames: " << filename );
148  dbgprint( " Tag: Name=" << tagName << " Value=" << tagValue << endl );
149 
150  // Create root sets for each mesh. Then pass these
151  // to the load_file functions to be populated.
152  EntityHandle rootset, partnset;
153  err = mbi->create_meshset( MESHSET_SET, rootset );MB_CHK_SET_ERR( err, "Creating root set failed" );
154  err = mbi->create_meshset( MESHSET_SET, partnset );MB_CHK_SET_ERR( err, "Creating partition set failed" );
155 
156  // Create the parallel communicator object with the partition handle associated with MOAB
157  ParallelComm* parallel_communicator = ParallelComm::get_pcomm( mbi, partnset, &comm );
158 
159  // Load the file from disk with given options
160  err = mbi->load_file( filename.c_str(), &rootset, read_options.c_str() );MB_CHK_SET_ERR( err, "MOAB::load_file failed" );
161 
162  // Create two tag handles: Exchange and Reduction operations
163  dbgprint( "-Creating tag handle " << tagName << "..." );
164  Tag tagReduce, tagExchange;
165  {
166  stringstream sstr;
167  // Create the exchange tag: default name = USERTAG_EXC
168  sstr << tagName << "_EXC";
169  err = mbi->tag_get_handle( sstr.str().c_str(), 1, MB_TYPE_INTEGER, tagExchange, MB_TAG_CREAT | MB_TAG_DENSE,
170  &tagValue );MB_CHK_SET_ERR( err, "Retrieving tag handles failed" );
171 
172  // Create the exchange tag: default name = USERTAG_RED
173  sstr.str( "" );
174  sstr << tagName << "_RED";
175  err = mbi->tag_get_handle( sstr.str().c_str(), 1, MB_TYPE_DOUBLE, tagReduce, MB_TAG_CREAT | MB_TAG_DENSE,
176  &tagValue );MB_CHK_SET_ERR( err, "Retrieving tag handles failed" );
177  }
178 
179  // Perform exchange tag data
180  dbgprint( "-Exchanging tags between processors " );
181  {
182  Range partEnts, dimEnts;
183  for( int dim = 0; dim <= 3; dim++ )
184  {
185  // Get all entities of dimension = dim
186  err = mbi->get_entities_by_dimension( rootset, dim, dimEnts, false );MB_CHK_ERR( err );
187 
188  vector< int > tagValues( dimEnts.size(), static_cast< int >( tagValue ) * ( rank + 1 ) * ( dim + 1 ) );
189  // Set local tag data for exchange
190  err = mbi->tag_set_data( tagExchange, dimEnts, &tagValues[0] );MB_CHK_SET_ERR( err, "Setting local tag data failed during exchange phase" );
191  // Merge entities into parent set
192  partEnts.merge( dimEnts );
193  }
194 
195  // Exchange tags between processors
196  err = parallel_communicator->exchange_tags( tagExchange, partEnts );MB_CHK_SET_ERR( err, "Exchanging tags between processors failed" );
197  }
198 
199  // Perform reduction of tag data
200  dbgprint( "-Reducing tags between processors " );
201  {
202  Range partEnts;
203  // Get all higher dimensional entities belonging to current partition
204  err = parallel_communicator->get_part_entities( partEnts );MB_CHK_SET_ERR( err, "ParallelComm::get_part_entities failed" );
205 
206  // Output what is in current partition sets
207  dbgprintall( "Number of Partitioned entities: " << partEnts.size() );
208  MPI_Barrier( comm );
209 
210  // Set local tag data for reduction
211  vector< double > tagValues( partEnts.size(), tagValue * ( rank + 1 ) );
212  err = mbi->tag_set_data( tagReduce, partEnts, &tagValues[0] );MB_CHK_SET_ERR( err, "Setting local tag data failed during reduce phase" );
213 
214  Range dummy;
215  // Reduce tag data using MPI_SUM on the interface between partitions
216  err = parallel_communicator->reduce_tags( tagReduce, MPI_SUM, dummy /*partEnts*/ );MB_CHK_SET_ERR( err, "Reducing tags between processors failed" );
217  }
218  // Write out to output file to visualize reduction/exchange of tag data
219  err = mbi->write_file( "test.h5m", "H5M", "PARALLEL=WRITE_PART" );MB_CHK_ERR( err );
220 
221  // Done, cleanup
222  delete mbi;
223 
224  dbgprint( "\n********** reduce_exchange_tags DONE! **********" );
225 
226  MPI_Finalize();
227 #else
228  std::cout << " compile with MPI and HDF5 for this example to work \n";
229 #endif
230  return 0;
231 }