MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_coupler_utils.hpp
Go to the documentation of this file.
1 /*
2  * imoab_coupler_utils.hpp
3  *
4  * Created on: Aug. 22, 2020
5  * \brief will contain utility methods for refactoring imoab*coupler tests, to avoid repetitive tasks
6  * \ even migrate tests can use some of these utilities
7  * 1) create_comm_group(int start, int end, int tag, MPI_Group& group, MPI_Comm& comm)
8  *
9  */
10 
11 #ifndef TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_
12 #define TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_
13 
14 #define CHECKIERR( rc, message ) \
15  if( 0 != ( rc ) ) \
16  { \
17  printf( "%s. ErrorCode = %d\n", message, rc ); \
18  return 1; \
19  }
20 
21 #define PUSH_TIMER( operation ) \
22  { \
23  timer_ops = timer.time_since_birth(); \
24  opName = operation; \
25  }
26 #define POP_TIMER( localcomm, localrank ) \
27  { \
28  double locElapsed = timer.time_since_birth() - timer_ops, minElapsed = 0, maxElapsed = 0; \
29  MPI_Reduce( &locElapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, localcomm ); \
30  MPI_Reduce( &locElapsed, &minElapsed, 1, MPI_DOUBLE, MPI_MIN, 0, localcomm ); \
31  if( !( localrank ) ) \
32  std::cout << "[LOG] Time taken to " << opName.c_str() << ": max = " << maxElapsed \
33  << ", avg = " << ( maxElapsed + minElapsed ) / 2 << "\n"; \
34  opName.clear(); \
35  }
36 
37 /*
38  * \brief create an MPI group and an MPI communicator for the group, in the global communicator
39  */
40 int create_group_and_comm( int start, int end, MPI_Group worldGroup, MPI_Group* group, MPI_Comm* comm )
41 {
42  std::vector< int > groupTasks;
43  groupTasks.resize( end - start + 1, 0 );
44  for( int i = start; i <= end; i++ )
45  groupTasks[i - start] = i;
46 
47  int ierr = MPI_Group_incl( worldGroup, end - start + 1, &groupTasks[0], group );
48  CHECKIERR( ierr, "Cannot create group" )
49 
50  ierr = MPI_Comm_create( MPI_COMM_WORLD, *group, comm );
51  CHECKIERR( ierr, "Cannot create comm" )
52 
53  return 0;
54 }
55 
56 int create_joint_comm_group( MPI_Group agroup, MPI_Group bgroup, MPI_Group* abgroup, MPI_Comm* abcomm )
57 {
58  int ierr = MPI_Group_union( agroup, bgroup, abgroup );
59  CHECKIERR( ierr, "Cannot create joint union group" )
60 
61  ierr = MPI_Comm_create( MPI_COMM_WORLD, *abgroup, abcomm );
62  CHECKIERR( ierr, "Cannot create joint communicator from union group" )
63 
64  return 0;
65 }
66 
68  int cmpTag,
69  iMOAB_AppID cplCmpId,
70  int cmpcouTag,
71  MPI_Comm* cmpcomm,
72  MPI_Group* cmpPEGroup,
73  MPI_Comm* coucomm,
74  MPI_Group* cplPEGroup,
75  MPI_Comm* cmpcoucomm,
76  std::string& filename,
77  std::string& readopts,
78  int nghlay,
79  int repartitioner_scheme )
80 {
81  int ierr = 0;
82  if( *cmpcomm != MPI_COMM_NULL )
83  {
84  // load first mesh
85  ierr = iMOAB_LoadMesh( cmpId, filename.c_str(), readopts.c_str(), &nghlay );
86  CHECKIERR( ierr, "Cannot load component mesh" )
87 
88  // then send mesh to coupler pes
89  ierr = iMOAB_SendMesh( cmpId, cmpcoucomm, cplPEGroup, &cmpcouTag,
90  &repartitioner_scheme ); // send to coupler pes
91  CHECKIERR( ierr, "cannot send elements" )
92  }
93  // now, receive mesh, on coupler communicator; first mesh 1, atm
94  if( *coucomm != MPI_COMM_NULL )
95  {
96 
97  ierr = iMOAB_ReceiveMesh( cplCmpId, cmpcoucomm, cmpPEGroup,
98  &cmpTag ); // receive from component
99  CHECKIERR( ierr, "cannot receive elements on coupler app" )
100  }
101 
102  // we can now free the sender buffers
103  if( *cmpcomm != MPI_COMM_NULL )
104  {
105  int context_id = cmpcouTag;
106  ierr = iMOAB_FreeSenderBuffers( cmpId, &context_id );
107  CHECKIERR( ierr, "cannot free buffers used to send atm mesh" )
108  }
109  return 0;
110 }
111 
112 #endif /* TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_ */