Loading [MathJax]/extensions/tex2jax.js
Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
moab::ParCommGraph Class Reference

#include <ParCommGraph.hpp>

+ Collaboration diagram for moab::ParCommGraph:

Public Types

enum  TypeGraph { INITIAL_MIGRATE , COVERAGE , DOF_BASED }
 

Public Member Functions

virtual ~ParCommGraph ()
 
 ParCommGraph (MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2)
 collective constructor, will be called on all sender tasks and receiver tasks More...
 
 ParCommGraph (const ParCommGraph &)
 copy constructor will copy only the senders, receivers, compid1, etc More...
 
ErrorCode compute_trivial_partition (std::vector< int > &numElemsPerTaskInGroup1)
 Based on the number of elements on each task in group 1, partition for group 2, trivially. More...
 
ErrorCode pack_receivers_graph (std::vector< int > &packed_recv_array)
 pack information about receivers view of the graph, for future sending to receiver root More...
 
bool is_root_sender ()
 
bool is_root_receiver ()
 
int sender (int index)
 
int receiver (int index)
 
int get_component_id1 ()
 
int get_component_id2 ()
 
int get_context_id ()
 
void set_context_id (int other_id)
 
EntityHandle get_cover_set ()
 
void set_cover_set (EntityHandle cover)
 
ErrorCode split_owned_range (int sender_rank, Range &owned)
 
ErrorCode split_owned_range (Range &owned)
 
ErrorCode send_graph (MPI_Comm jcomm)
 
ErrorCode send_graph_partition (ParallelComm *pco, MPI_Comm jcomm)
 
ErrorCode send_mesh_parts (MPI_Comm jcomm, ParallelComm *pco, Range &owned)
 
ErrorCode receive_comm_graph (MPI_Comm jcomm, ParallelComm *pco, std::vector< int > &pack_array)
 
ErrorCode receive_mesh (MPI_Comm jcomm, ParallelComm *pco, EntityHandle local_set, std::vector< int > &senders_local)
 
ErrorCode release_send_buffers ()
 
ErrorCode send_tag_values (MPI_Comm jcomm, ParallelComm *pco, Range &owned, std::vector< Tag > &tag_handles)
 
ErrorCode receive_tag_values (MPI_Comm jcomm, ParallelComm *pco, Range &owned, std::vector< Tag > &tag_handles)
 
const std::vector< int > & senders ()
 
const std::vector< int > & receivers ()
 
ErrorCode settle_send_graph (TupleList &TLcovIDs)
 
void SetReceivingAfterCoverage (std::map< int, std::set< int > > &idsFromProcs)
 
void settle_comm_by_ids (int comp, TupleList &TLBackToComp, std::vector< int > &valuesComp)
 
ErrorCode set_split_ranges (int comp, TupleList &TLBackToComp1, std::vector< int > &valuesComp1, int lenTag, Range &ents_of_interest, int type)
 
ErrorCode form_tuples_to_migrate_mesh (Interface *mb, TupleList &TLv, TupleList &TLc, int type, int lenTagType1)
 
ErrorCode form_mesh_from_tuples (Interface *mb, TupleList &TLv, TupleList &TLc, int type, int lenTagType1, EntityHandle fset, Range &primary_ents, std::vector< int > &values_entities)
 
ErrorCode compute_partition (ParallelComm *pco, Range &owned, int met)
 
ErrorCode dump_comm_information (std::string prefix, int is_send)
 

Private Member Functions

void find_group_ranks (MPI_Group group, MPI_Comm join, std::vector< int > &ranks)
 find ranks of a group with respect to an encompassing communicator More...
 

Private Attributes

MPI_Comm comm
 
std::vector< int > senderTasks
 
std::vector< int > receiverTasks
 
bool rootSender
 
bool rootReceiver
 
int rankInGroup1
 
int rankInGroup2
 
int rankInJoin
 
int joinSize
 
int compid1
 
int compid2
 
int context_id
 
EntityHandle cover_set
 
std::map< int, std::vector< int > > recv_graph
 
std::map< int, std::vector< int > > recv_sizes
 
std::map< int, std::vector< int > > sender_graph
 
std::map< int, std::vector< int > > sender_sizes
 
std::vector< ParallelComm::Buffer * > localSendBuffs
 
int * comm_graph
 
std::map< int, Rangesplit_ranges
 
std::vector< MPI_Request > sendReqs
 
std::vector< int > corr_tasks
 
std::vector< int > corr_sizes
 
TypeGraph graph_type
 
std::map< int, std::vector< int > > involved_IDs_map
 
std::map< int, std::vector< int > > map_index
 
std::map< int, std::vector< int > > map_ptr
 

Detailed Description

Definition at line 55 of file ParCommGraph.hpp.

Member Enumeration Documentation

◆ TypeGraph

Enumerator
INITIAL_MIGRATE 
COVERAGE 
DOF_BASED 

Definition at line 58 of file ParCommGraph.hpp.

59  { 60  INITIAL_MIGRATE, 61  COVERAGE, 62  DOF_BASED 63  };

Constructor & Destructor Documentation

◆ ~ParCommGraph()

moab::ParCommGraph::~ParCommGraph ( )
virtual

Definition at line 73 of file ParCommGraph.cpp.

74 { 75  // TODO Auto-generated destructor stub 76 }

◆ ParCommGraph() [1/2]

moab::ParCommGraph::ParCommGraph ( MPI_Comm  joincomm,
MPI_Group  group1,
MPI_Group  group2,
int  coid1,
int  coid2 
)

collective constructor, will be called on all sender tasks and receiver tasks

Parameters
[in]joincommjoint MPI communicator that covers both sender and receiver MPI groups
[in]group1MPI group formed with sender tasks; (sender usually loads the mesh in a migrate scenario)
[in]group2MPI group formed with receiver tasks; (receiver usually receives the mesh in a migrate scenario)
[in]coid1sender component unique identifier in a coupled application (in climate simulations could be the Atmosphere Comp id = 5 or Ocean Comp ID , 17)
[in]coid2receiver component unique identifier in a coupled application (it is usually the coupler, 2, in E3SM)

this graph will be formed on sender and receiver tasks, and, in principle, will hold info about how the local entities are distributed on the other side

Its major role is to help in migration of data, from component to the coupler and vice-versa; Each local entity has a corresponding task (or tasks) on the other side, to where the data needs to be sent

important data stored in ParCommGraph, immediately it is created

  • all sender and receiver tasks ids, with respect to the joint communicator
  • local rank in sender and receiver group (-1 if not part of the respective group)
  • rank in the joint communicator (from 0)

Definition at line 20 of file ParCommGraph.cpp.

21  : comm( joincomm ), compid1( coid1 ), compid2( coid2 ) 22 { 23  // find out the tasks from each group, in the joint communicator 24  find_group_ranks( group1, comm, senderTasks ); 25  find_group_ranks( group2, comm, receiverTasks ); 26  27  rootSender = rootReceiver = false; 28  rankInGroup1 = rankInGroup2 = rankInJoin = -1; // not initialized, or not part of the group 29  30  int mpierr = MPI_Group_rank( group1, &rankInGroup1 ); 31  if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1; 32  33  mpierr = MPI_Group_rank( group2, &rankInGroup2 ); 34  if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1; 35  36  mpierr = MPI_Comm_rank( comm, &rankInJoin ); 37  if( MPI_SUCCESS != mpierr ) // it should be a fatal error 38  rankInJoin = -1; 39  40  mpierr = MPI_Comm_size( comm, &joinSize ); 41  if( MPI_SUCCESS != mpierr ) // it should be a fatal error 42  joinSize = -1; 43  44  if( 0 == rankInGroup1 ) rootSender = true; 45  if( 0 == rankInGroup2 ) rootReceiver = true; 46  graph_type = INITIAL_MIGRATE; // 0 47  comm_graph = NULL; 48  context_id = -1; 49  cover_set = 0; // refers to nothing yet 50 }

References comm, comm_graph, context_id, cover_set, find_group_ranks(), graph_type, INITIAL_MIGRATE, joinSize, rankInGroup1, rankInGroup2, rankInJoin, receiverTasks, rootReceiver, rootSender, and senderTasks.

◆ ParCommGraph() [2/2]

moab::ParCommGraph::ParCommGraph ( const ParCommGraph src)

copy constructor will copy only the senders, receivers, compid1, etc

Definition at line 53 of file ParCommGraph.cpp.

54 { 55  comm = src.comm; 56  senderTasks = src.senderTasks; // these are the sender tasks in joint comm 57  receiverTasks = src.receiverTasks; // these are all the receiver tasks in joint comm 58  rootSender = src.rootSender; 59  rootReceiver = src.rootReceiver; 60  rankInGroup1 = src.rankInGroup1; 61  rankInGroup2 = src.rankInGroup2; // group 1 is sender, 2 is receiver 62  rankInJoin = src.rankInJoin; 63  joinSize = src.joinSize; 64  compid1 = src.compid1; 65  compid2 = src.compid2; 66  comm_graph = NULL; 67  graph_type = src.graph_type; 68  context_id = src.context_id; 69  cover_set = src.cover_set; 70  return; 71 }

References comm, comm_graph, compid1, compid2, context_id, cover_set, graph_type, joinSize, rankInGroup1, rankInGroup2, rankInJoin, receiverTasks, rootReceiver, rootSender, and senderTasks.

Member Function Documentation

◆ compute_partition()

ErrorCode moab::ParCommGraph::compute_partition ( ParallelComm pco,
Range owned,
int  met 
)

Definition at line 1134 of file ParCommGraph.cpp.

1135 { 1136  // we are on a task on sender, and need to compute a new partition; 1137  // primary cells need to be distributed to nb receivers tasks 1138  // first, we will use graph partitioner, with zoltan; 1139  // in the graph that we need to build, the first layer of ghosts is needed; 1140  // can we avoid that ? For example, we can find out from each boundary edge/face what is the 1141  // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1 1142  // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do 1143  // not need any ghost exchange 1144  1145  // find first edges that are shared 1146  if( owned.empty() ) 1147  return MB_SUCCESS; // nothing to do? empty partition is not allowed, maybe we should return 1148  // error? 1149  Core* mb = (Core*)pco->get_moab(); 1150  1151  double t1, t2, t3; 1152  t1 = MPI_Wtime(); 1153  int primaryDim = mb->dimension_from_handle( *owned.rbegin() ); 1154  int interfaceDim = primaryDim - 1; // should be 1 or 2 1155  Range sharedEdges; 1156  ErrorCode rval; 1157  1158  std::vector< int > shprocs( MAX_SHARING_PROCS ); 1159  std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS ); 1160  1161  Tag gidTag = mb->globalId_tag(); 1162  int np; 1163  unsigned char pstatus; 1164  1165  std::multimap< int, int > extraGraphEdges; 1166  // std::map<int, int> adjCellsId; 1167  std::map< int, int > extraCellsProc; 1168  // if method is 2, no need to do the exchange for adjacent cells across partition boundary 1169  // these maps above will be empty for method 2 (geometry) 1170  if( 1 == met ) 1171  { 1172  rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim, 1173  /*const bool iface*/ true );MB_CHK_ERR( rval ); 1174  1175 #ifdef VERBOSE 1176  std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size() 1177  << "\n"; 1178 #endif 1179  // find to what processors we need to send the ghost info about the edge 1180  // first determine the local graph; what elements are adjacent to each cell in owned range 1181  // cells that are sharing a partition interface edge, are identified first, and form a map 1182  TupleList TLe; // tuple list for cells 1183  TLe.initialize( 2, 0, 1, 0, sharedEdges.size() ); // send to, id of adj cell, remote edge 1184  TLe.enableWriteAccess(); 1185  1186  std::map< EntityHandle, int > edgeToCell; // from local boundary edge to adjacent cell id 1187  // will be changed after 1188  for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ ) 1189  { 1190  EntityHandle edge = *eit; 1191  // get the adjacent cell 1192  Range adjEnts; 1193  rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval ); 1194  if( adjEnts.size() > 0 ) 1195  { 1196  EntityHandle adjCell = adjEnts[0]; 1197  int gid; 1198  rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval ); 1199  rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval ); 1200  int n = TLe.get_n(); 1201  TLe.vi_wr[2 * n] = shprocs[0]; 1202  TLe.vi_wr[2 * n + 1] = gid; 1203  TLe.vul_wr[n] = shhandles[0]; // the remote edge corresponding to shared edge 1204  edgeToCell[edge] = gid; // store the map between edge and local id of adj cell 1205  TLe.inc_n(); 1206  } 1207  } 1208  1209 #ifdef VERBOSE 1210  std::stringstream ff2; 1211  ff2 << "TLe_" << pco->rank() << ".txt"; 1212  TLe.print_to_file( ff2.str().c_str() ); 1213 #endif 1214  // send the data to the other processors: 1215  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 ); 1216  // on receiver side, each local edge will have the remote cell adjacent to it! 1217  1218  int ne = TLe.get_n(); 1219  for( int i = 0; i < ne; i++ ) 1220  { 1221  int sharedProc = TLe.vi_rd[2 * i]; // this info is coming from here, originally 1222  int remoteCellID = TLe.vi_rd[2 * i + 1]; // this is the id of the remote cell, on sharedProc 1223  EntityHandle localCell = TLe.vul_rd[i]; // this is now local edge/face on this proc 1224  int localCellId = edgeToCell[localCell]; // this is the local cell adjacent to edge/face 1225  // now, we will need to add to the graph the pair <localCellId, remoteCellID> 1226  std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID ); 1227  extraGraphEdges.insert( extraAdj ); 1228  // adjCellsId [edgeToCell[localCell]] = remoteCellID; 1229  extraCellsProc[remoteCellID] = sharedProc; 1230 #ifdef VERBOSE 1231  std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n"; 1232 #endif 1233  } 1234  } 1235  t2 = MPI_Wtime(); 1236  if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n"; 1237  // so adj cells ids; need to call zoltan for parallel partition 1238 #ifdef MOAB_HAVE_ZOLTAN 1239  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb, pco ); 1240  if( 1 <= met ) // partition in zoltan, either graph or geometric partitioner 1241  { 1242  std::map< int, Range > distribution; // how to distribute owned elements by processors in receiving groups 1243  // in how many tasks do we want to be distributed? 1244  int numNewPartitions = (int)receiverTasks.size(); 1245  Range primaryCells = owned.subset_by_dimension( primaryDim ); 1246  rval = mbZTool->partition_owned_cells( primaryCells, extraGraphEdges, extraCellsProc, numNewPartitions, 1247  distribution, met );MB_CHK_ERR( rval ); 1248  for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ ) 1249  { 1250  int part_index = mit->first; 1251  assert( part_index < numNewPartitions ); 1252  split_ranges[receiverTasks[part_index]] = mit->second; 1253  } 1254  } 1255  // delete now the partitioner 1256  delete mbZTool; 1257 #endif 1258  t3 = MPI_Wtime(); 1259  if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n"; 1260  return MB_SUCCESS; 1261 }

References moab::Range::begin(), moab::ProcConfig::crystal_router(), moab::Range::empty(), moab::TupleList::enableWriteAccess(), moab::Range::end(), ErrorCode, moab::ParallelComm::get_moab(), moab::TupleList::get_n(), moab::ParallelComm::get_shared_entities(), moab::ParallelComm::get_sharing_data(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MAX_SHARING_PROCS, mb, MB_CHK_ERR, MB_SUCCESS, ZoltanPartitioner::partition_owned_cells(), moab::TupleList::print_to_file(), moab::ParallelComm::proc_config(), moab::ParallelComm::rank(), moab::Range::rbegin(), receiverTasks, rootSender, moab::Range::size(), split_ranges, moab::Range::subset_by_dimension(), moab::TupleList::vi_rd, moab::TupleList::vi_wr, moab::TupleList::vul_rd, and moab::TupleList::vul_wr.

Referenced by iMOAB_SendMesh().

◆ compute_trivial_partition()

ErrorCode moab::ParCommGraph::compute_trivial_partition ( std::vector< int > &  numElemsPerTaskInGroup1)

Based on the number of elements on each task in group 1, partition for group 2, trivially.

Operations: it is called on every receiver task; decides how are all elements distributed

Note: establish how many elements are sent from each task in group 1 to tasks in group 2 This call is usually made on a root / master process, and will construct local maps that are member data, which contain the communication graph, in both directions Also, number of elements migrated/exchanged between each sender/receiver

Parameters
[in]numElemsPerTaskInGroup1(std::vector<int> &) number of elements on each sender task

Definition at line 100 of file ParCommGraph.cpp.

101 { 102  103  recv_graph.clear(); 104  recv_sizes.clear(); 105  sender_graph.clear(); 106  sender_sizes.clear(); 107  108  if( numElemsPerTaskInGroup1.size() != senderTasks.size() ) 109  return MB_FAILURE; // each sender has a number of elements that it owns 110  111  // first find out total number of elements to be sent from all senders 112  int total_elems = 0; 113  std::vector< int > accum; 114  accum.push_back( 0 ); 115  116  int num_senders = (int)senderTasks.size(); 117  118  for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ ) 119  { 120  total_elems += numElemsPerTaskInGroup1[k]; 121  accum.push_back( total_elems ); 122  } 123  124  int num_recv = ( (int)receiverTasks.size() ); 125  // in trivial partition, every receiver should get about total_elems/num_receivers elements 126  int num_per_receiver = (int)( total_elems / num_recv ); 127  int leftover = total_elems - num_per_receiver * num_recv; 128  129  // so receiver k will receive [starts[k], starts[k+1] ) interval 130  std::vector< int > starts; 131  starts.resize( num_recv + 1 ); 132  starts[0] = 0; 133  for( int k = 0; k < num_recv; k++ ) 134  { 135  starts[k + 1] = starts[k] + num_per_receiver; 136  if( k < leftover ) starts[k + 1]++; 137  } 138  139  // each sender will send to a number of receivers, based on how the 140  // arrays starts[0:num_recv] and accum[0:sendr] overlap 141  int lastUsedReceiverRank = 0; // first receiver was not treated yet 142  for( int j = 0; j < num_senders; j++ ) 143  { 144  // we could start the receiver loop with the latest receiver that received from previous 145  // sender 146  for( int k = lastUsedReceiverRank; k < num_recv; k++ ) 147  { 148  // if overlap: 149  if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] ) 150  { 151  recv_graph[receiverTasks[k]].push_back( senderTasks[j] ); 152  sender_graph[senderTasks[j]].push_back( receiverTasks[k] ); 153  154  // we still need to decide what is the overlap 155  int sizeOverlap = 1; // at least 1, for sure 156  // 1 157  if( starts[k] >= accum[j] ) // one end is starts[k] 158  { 159  if( starts[k + 1] >= accum[j + 1] ) // the other end is accum[j+1] 160  sizeOverlap = accum[j + 1] - starts[k]; 161  else // 162  sizeOverlap = starts[k + 1] - starts[k]; 163  } 164  else // one end is accum[j] 165  { 166  if( starts[k + 1] >= accum[j + 1] ) // the other end is accum[j+1] 167  sizeOverlap = accum[j + 1] - accum[j]; 168  else 169  sizeOverlap = starts[k + 1] - accum[j]; 170  } 171  recv_sizes[receiverTasks[k]].push_back( sizeOverlap ); // basically, task k will receive from 172  // sender j, sizeOverlap elems 173  sender_sizes[senderTasks[j]].push_back( sizeOverlap ); 174  if( starts[k] > accum[j + 1] ) 175  { 176  lastUsedReceiverRank = k - 1; // so next k loop will start a little higher, we 177  // probably finished with first few receivers (up 178  // to receiver lastUsedReceiverRank) 179  break; // break the k loop, we distributed all elements from sender j to some 180  // receivers 181  } 182  } 183  } 184  } 185  186  return MB_SUCCESS; 187 }

References MB_SUCCESS, receiverTasks, recv_graph, recv_sizes, sender_graph, sender_sizes, and senderTasks.

Referenced by iMOAB_SendMesh().

◆ dump_comm_information()

ErrorCode moab::ParCommGraph::dump_comm_information ( std::string  prefix,
int  is_send 
)

Definition at line 1606 of file ParCommGraph.cpp.

1607 { 1608  // 1609  if( -1 != rankInGroup1 && 1 == is_send ) // it is a sender task 1610  { 1611  std::ofstream dbfile; 1612  std::stringstream outf; 1613  outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt"; 1614  dbfile.open( outf.str().c_str() ); 1615  1616  if( graph_type == COVERAGE ) 1617  { 1618  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 1619  mit != involved_IDs_map.end(); mit++ ) 1620  { 1621  int receiver_proc = mit->first; 1622  std::vector< int >& eids = mit->second; 1623  dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n"; 1624  } 1625  } 1626  else if( graph_type == INITIAL_MIGRATE ) // just after migration 1627  { 1628  for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ ) 1629  { 1630  int receiver_proc = mit->first; 1631  Range& eids = mit->second; 1632  dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n"; 1633  } 1634  } 1635  else if( graph_type == DOF_BASED ) // just after migration, or from computeGraph 1636  { 1637  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 1638  mit != involved_IDs_map.end(); mit++ ) 1639  { 1640  int receiver_proc = mit->first; 1641  dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n"; 1642  } 1643  } 1644  dbfile.close(); 1645  } 1646  if( -1 != rankInGroup2 && 0 == is_send ) // it is a receiver task 1647  { 1648  std::ofstream dbfile; 1649  std::stringstream outf; 1650  outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type 1651  << ".txt"; 1652  dbfile.open( outf.str().c_str() ); 1653  1654  if( graph_type == COVERAGE ) 1655  { 1656  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 1657  mit != involved_IDs_map.end(); mit++ ) 1658  { 1659  int sender_proc = mit->first; 1660  std::vector< int >& eids = mit->second; 1661  dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n"; 1662  } 1663  } 1664  else if( graph_type == INITIAL_MIGRATE ) // just after migration 1665  { 1666  for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ ) 1667  { 1668  int sender_proc = mit->first; 1669  Range& eids = mit->second; 1670  dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n"; 1671  } 1672  } 1673  else if( graph_type == DOF_BASED ) // just after migration 1674  { 1675  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 1676  mit != involved_IDs_map.end(); mit++ ) 1677  { 1678  int sender_proc = mit->first; 1679  dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n"; 1680  } 1681  } 1682  dbfile.close(); 1683  } 1684  return MB_SUCCESS; 1685 }

References COVERAGE, DOF_BASED, graph_type, INITIAL_MIGRATE, involved_IDs_map, MB_SUCCESS, rankInGroup1, rankInGroup2, rankInJoin, moab::Range::size(), and split_ranges.

◆ find_group_ranks()

void moab::ParCommGraph::find_group_ranks ( MPI_Group  group,
MPI_Comm  join,
std::vector< int > &  ranks 
)
private

find ranks of a group with respect to an encompassing communicator

Operations: Local, usually called on root process of the group

Parameters
[in]joincomm(MPI_Comm)
[in]group(MPI_Group)
[out]ranks( std::vector<int>) ranks with respect to the joint communicator

Definition at line 81 of file ParCommGraph.cpp.

82 { 83  MPI_Group global_grp; 84  MPI_Comm_group( joincomm, &global_grp ); 85  86  int grp_size; 87  88  MPI_Group_size( group, &grp_size ); 89  std::vector< int > rks( grp_size ); 90  ranks.resize( grp_size ); 91  92  for( int i = 0; i < grp_size; i++ ) 93  rks[i] = i; 94  95  MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] ); 96  MPI_Group_free( &global_grp ); 97  return; 98 }

Referenced by ParCommGraph().

◆ form_mesh_from_tuples()

ErrorCode moab::ParCommGraph::form_mesh_from_tuples ( Interface mb,
TupleList TLv,
TupleList TLc,
int  type,
int  lenTagType1,
EntityHandle  fset,
Range primary_ents,
std::vector< int > &  values_entities 
)

Definition at line 1396 of file ParCommGraph.cpp.

1404 { 1405  // might need to fill also the split_range things 1406  // we will always need GlobalID tag 1407  Tag gidtag = mb->globalId_tag(); 1408  // for Type1, we need GLOBAL_DOFS tag; 1409  Tag gds; 1410  ErrorCode rval; 1411  std::vector< int > def_val( lenTagType1, 0 ); 1412  if( type == 1 ) 1413  { 1414  // we may need to create this tag 1415  rval = mb->tag_get_handle( "GLOBAL_DOFS", lenTagType1, MB_TYPE_INTEGER, gds, MB_TAG_CREAT | MB_TAG_DENSE, 1416  &def_val[0] );MB_CHK_ERR( rval ); 1417  } 1418  1419  std::map< int, EntityHandle > vertexMap; // 1420  Range verts; 1421  // always form vertices and add them to the fset; 1422  int n = TLv.get_n(); 1423  EntityHandle vertex; 1424  for( int i = 0; i < n; i++ ) 1425  { 1426  int gid = TLv.vi_rd[2 * i + 1]; 1427  if( vertexMap.find( gid ) == vertexMap.end() ) 1428  { 1429  // need to form this vertex 1430  rval = mb->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval ); 1431  vertexMap[gid] = vertex; 1432  verts.insert( vertex ); 1433  rval = mb->tag_set_data( gidtag, &vertex, 1, &gid );MB_CHK_ERR( rval ); 1434  // if point cloud, 1435  } 1436  if( 2 == type ) // point cloud, add it to the split_ranges ? 1437  { 1438  split_ranges[TLv.vi_rd[2 * i]].insert( vertexMap[gid] ); 1439  } 1440  // todo : when to add the values_entities ? 1441  } 1442  rval = mb->add_entities( fset, verts );MB_CHK_ERR( rval ); 1443  if( 2 == type ) 1444  { 1445  primary_ents = verts; 1446  values_entities.resize( verts.size() ); // just get the ids of vertices 1447  rval = mb->tag_get_data( gidtag, verts, &values_entities[0] );MB_CHK_ERR( rval ); 1448  return MB_SUCCESS; 1449  } 1450  1451  n = TLc.get_n(); 1452  int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10; // 10 is the max number of vertices in cell 1453  1454  EntityHandle new_element; 1455  Range cells; 1456  std::map< int, EntityHandle > cellMap; // do no tcreate one if it already exists, maybe from other processes 1457  for( int i = 0; i < n; i++ ) 1458  { 1459  int from_proc = TLc.vi_rd[size_tuple * i]; 1460  int globalIdEl = TLc.vi_rd[size_tuple * i + 1]; 1461  if( cellMap.find( globalIdEl ) == cellMap.end() ) // need to create the cell 1462  { 1463  int current_index = 2; 1464  if( 1 == type ) current_index += lenTagType1; 1465  int nnodes = TLc.vi_rd[size_tuple * i + current_index]; 1466  std::vector< EntityHandle > conn; 1467  conn.resize( nnodes ); 1468  for( int j = 0; j < nnodes; j++ ) 1469  { 1470  conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]]; 1471  } 1472  // 1473  EntityType entType = MBQUAD; 1474  if( nnodes > 4 ) entType = MBPOLYGON; 1475  if( nnodes < 4 ) entType = MBTRI; 1476  rval = mb->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " ); 1477  cells.insert( new_element ); 1478  cellMap[globalIdEl] = new_element; 1479  rval = mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " ); 1480  if( 1 == type ) 1481  { 1482  // set the gds tag 1483  rval = mb->tag_set_data( gds, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " ); 1484  } 1485  } 1486  split_ranges[from_proc].insert( cellMap[globalIdEl] ); 1487  } 1488  rval = mb->add_entities( fset, cells );MB_CHK_ERR( rval ); 1489  primary_ents = cells; 1490  if( 1 == type ) 1491  { 1492  values_entities.resize( lenTagType1 * primary_ents.size() ); 1493  rval = mb->tag_get_data( gds, primary_ents, &values_entities[0] );MB_CHK_ERR( rval ); 1494  } 1495  else // type == 3 1496  { 1497  values_entities.resize( primary_ents.size() ); // just get the ids ! 1498  rval = mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval ); 1499  } 1500  return MB_SUCCESS; 1501 }

References ErrorCode, moab::TupleList::get_n(), moab::Range::insert(), mb, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TYPE_INTEGER, MBPOLYGON, MBQUAD, MBTRI, moab::Range::size(), split_ranges, moab::TupleList::vi_rd, and moab::TupleList::vr_rd.

◆ form_tuples_to_migrate_mesh()

ErrorCode moab::ParCommGraph::form_tuples_to_migrate_mesh ( Interface mb,
TupleList TLv,
TupleList TLc,
int  type,
int  lenTagType1 
)

Definition at line 1309 of file ParCommGraph.cpp.

1314 { 1315  // we will always need GlobalID tag 1316  Tag gidtag = mb->globalId_tag(); 1317  // for Type1, we need GLOBAL_DOFS tag; 1318  Tag gds; 1319  ErrorCode rval; 1320  if( type == 1 ) 1321  { 1322  rval = mb->tag_get_handle( "GLOBAL_DOFS", gds ); 1323  } 1324  // find vertices to be sent, for each of the split_ranges procs 1325  std::map< int, Range > verts_to_proc; 1326  int numv = 0, numc = 0; 1327  for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ ) 1328  { 1329  int to_proc = it->first; 1330  Range verts; 1331  if( type != 2 ) 1332  { 1333  rval = mb->get_connectivity( it->second, verts );MB_CHK_ERR( rval ); 1334  numc += (int)it->second.size(); 1335  } 1336  else 1337  verts = it->second; 1338  verts_to_proc[to_proc] = verts; 1339  numv += (int)verts.size(); 1340  } 1341  // first vertices: 1342  TLv.initialize( 2, 0, 0, 3, numv ); // to proc, GLOBAL ID, 3 real coordinates 1343  TLv.enableWriteAccess(); 1344  // use the global id of vertices for connectivity 1345  for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ ) 1346  { 1347  int to_proc = it->first; 1348  Range& verts = it->second; 1349  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit ) 1350  { 1351  EntityHandle v = *vit; 1352  int n = TLv.get_n(); // current size of tuple list 1353  TLv.vi_wr[2 * n] = to_proc; // send to processor 1354  1355  rval = mb->tag_get_data( gidtag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval ); 1356  rval = mb->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval ); 1357  TLv.inc_n(); // increment tuple list size 1358  } 1359  } 1360  if( type == 2 ) return MB_SUCCESS; // no need to fill TLc 1361  // to proc, ID cell, gdstag, nbv, id conn, 1362  int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10; // 10 is the max number of vertices in cell 1363  1364  std::vector< int > gdvals; 1365  1366  TLc.initialize( size_tuple, 0, 0, 0, numc ); // to proc, GLOBAL ID, 3 real coordinates 1367  TLc.enableWriteAccess(); 1368  for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ ) 1369  { 1370  int to_proc = it->first; 1371  Range& cells = it->second; 1372  for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit ) 1373  { 1374  EntityHandle cell = *cit; 1375  int n = TLc.get_n(); // current size of tuple list 1376  TLc.vi_wr[size_tuple * n] = to_proc; 1377  int current_index = 2; 1378  rval = mb->tag_get_data( gidtag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval ); 1379  if( 1 == type ) 1380  { 1381  rval = mb->tag_get_data( gds, &cell, 1, &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval ); 1382  current_index += lenTagType1; 1383  } 1384  // now get connectivity 1385  const EntityHandle* conn = NULL; 1386  int nnodes = 0; 1387  rval = mb->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval ); 1388  // fill nnodes: 1389  TLc.vi_wr[size_tuple * n + current_index] = nnodes; 1390  rval = mb->tag_get_data( gidtag, conn, nnodes, &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval ); 1391  TLc.inc_n(); // increment tuple list size 1392  } 1393  } 1394  return MB_SUCCESS; 1395 }

References moab::Range::begin(), moab::TupleList::enableWriteAccess(), moab::Range::end(), ErrorCode, moab::TupleList::get_n(), moab::TupleList::inc_n(), moab::TupleList::initialize(), mb, MB_CHK_ERR, MB_SUCCESS, moab::Range::size(), split_ranges, moab::TupleList::vi_wr, and moab::TupleList::vr_wr.

◆ get_component_id1()

int moab::ParCommGraph::get_component_id1 ( )
inline

Definition at line 153 of file ParCommGraph.hpp.

154  { 155  return compid1; 156  }

References compid1.

◆ get_component_id2()

int moab::ParCommGraph::get_component_id2 ( )
inline

Definition at line 157 of file ParCommGraph.hpp.

158  { 159  return compid2; 160  }

References compid2.

◆ get_context_id()

int moab::ParCommGraph::get_context_id ( )
inline

Definition at line 162 of file ParCommGraph.hpp.

163  { 164  return context_id; 165  }

References context_id.

◆ get_cover_set()

EntityHandle moab::ParCommGraph::get_cover_set ( )
inline

Definition at line 171 of file ParCommGraph.hpp.

172  { 173  return cover_set; 174  }

References cover_set.

Referenced by iMOAB_ReceiveElementTag(), and iMOAB_SendElementTag().

◆ is_root_receiver()

bool moab::ParCommGraph::is_root_receiver ( )
inline

Definition at line 138 of file ParCommGraph.hpp.

139  { 140  return rootReceiver; 141  }

References rootReceiver.

◆ is_root_sender()

bool moab::ParCommGraph::is_root_sender ( )
inline

Definition at line 133 of file ParCommGraph.hpp.

134  { 135  return rootSender; 136  }

References rootSender.

Referenced by send_graph(), send_graph_partition(), and send_mesh_parts().

◆ pack_receivers_graph()

ErrorCode moab::ParCommGraph::pack_receivers_graph ( std::vector< int > &  packed_recv_array)

pack information about receivers view of the graph, for future sending to receiver root

Operations: Local, called on root process of the senders group

Parameters
[out]packed_recv_arraypacked data will be sent to the root of receivers, and distributed from there, and will have this information, for each receiver, concatenated receiver 1 task, number of senders for receiver 1, then sender tasks for receiver 1, receiver 2 task, number of senders for receiver 2, sender tasks for receiver 2, etc Note: only the root of senders will compute this, and send it over to the receiver root, which will distribute it over each receiver; We do not pack the sizes of data to be sent, only the senders for each of the receivers (could be of size O(n^2) , where n is the number of tasks ; but in general, it should be more like O(n) ). Each sender sends to a "finite" number of receivers, and each receiver receives from a finite number of senders). We need this info to decide how to set up the send/receive waiting game for non-blocking communication )

Definition at line 189 of file ParCommGraph.cpp.

190 { 191  // it will basically look at local data, to pack communication graph, each receiver task will 192  // have to post receives for each sender task that will send data to it; the array will be 193  // communicated to root receiver, and eventually distributed to receiver tasks 194  195  /* 196  * packed_array will have receiver, number of senders, then senders, etc 197  */ 198  if( recv_graph.size() < receiverTasks.size() ) 199  { 200  // big problem, we have empty partitions in receive 201  std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n"; 202  } 203  for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ ) 204  { 205  int recv = it->first; 206  std::vector< int >& senders = it->second; 207  packed_recv_array.push_back( recv ); 208  packed_recv_array.push_back( (int)senders.size() ); 209  210  for( int k = 0; k < (int)senders.size(); k++ ) 211  packed_recv_array.push_back( senders[k] ); 212  } 213  214  return MB_SUCCESS; 215 }

References MB_SUCCESS, receiverTasks, recv_graph, and senders().

Referenced by send_graph().

◆ receive_comm_graph()

ErrorCode moab::ParCommGraph::receive_comm_graph ( MPI_Comm  jcomm,
ParallelComm pco,
std::vector< int > &  pack_array 
)

Definition at line 358 of file ParCommGraph.cpp.

359 { 360  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver 361  // knows what data to expect 362  MPI_Comm receive = pco->comm(); 363  int size_pack_array, ierr; 364  MPI_Status status; 365  if( rootReceiver ) 366  { 367  /* 368  * MPI_Probe( 369  int source, 370  int tag, 371  MPI_Comm comm, 372  MPI_Status* status) 373  * 374  */ 375  int mtag = compid2; 376  ierr = MPI_Probe( sender( 0 ), mtag, jcomm, &status ); 377  if( 0 != ierr ) 378  { 379  std::cout << " MPI_Probe failure: " << ierr << "\n"; 380  return MB_FAILURE; 381  } 382  // get the count of data received from the MPI_Status structure 383  ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array ); 384  if( 0 != ierr ) 385  { 386  std::cout << " MPI_Get_count failure: " << ierr << "\n"; 387  return MB_FAILURE; 388  } 389 #ifdef VERBOSE 390  std::cout << " receive comm graph size: " << size_pack_array << "\n"; 391 #endif 392  pack_array.resize( size_pack_array ); 393  ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), mtag, jcomm, &status ); 394  if( 0 != ierr ) return MB_FAILURE; 395 #ifdef VERBOSE 396  std::cout << " receive comm graph "; 397  for( int k = 0; k < (int)pack_array.size(); k++ ) 398  std::cout << " " << pack_array[k]; 399  std::cout << "\n"; 400 #endif 401  } 402  403  // now broadcast this whole array to all receivers, so they know what to expect 404  ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive ); 405  if( 0 != ierr ) return MB_FAILURE; 406  pack_array.resize( size_pack_array ); 407  ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive ); 408  if( 0 != ierr ) return MB_FAILURE; 409  return MB_SUCCESS; 410 }

References moab::ParallelComm::comm(), compid2, MB_SUCCESS, rootReceiver, and sender().

Referenced by iMOAB_ReceiveMesh().

◆ receive_mesh()

ErrorCode moab::ParCommGraph::receive_mesh ( MPI_Comm  jcomm,
ParallelComm pco,
EntityHandle  local_set,
std::vector< int > &  senders_local 
)

Definition at line 412 of file ParCommGraph.cpp.

416 { 417  ErrorCode rval; 418  int ierr; 419  MPI_Status status; 420  // we also need to fill corresponding mesh info on the other side 421  corr_tasks = senders_local; 422  Range newEnts; 423  424  Tag orgSendProcTag; // this will be a tag set on the received mesh, with info about from what 425  // task / PE the 426  // primary element came from, in the joint communicator ; this will be forwarded by coverage 427  // mesh 428  int defaultInt = -1; // no processor, so it was not migrated from somewhere else 429  rval = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag, 430  MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" ); 431  int mtag = compid2; 432  if( !senders_local.empty() ) 433  { 434  for( size_t k = 0; k < senders_local.size(); k++ ) 435  { 436  int sender1 = senders_local[k]; 437  // first receive the size of the buffer using probe 438  /* 439  * MPI_Probe( 440  int source, 441  int tag, 442  MPI_Comm comm, 443  MPI_Status* status) 444  * 445  */ 446  ierr = MPI_Probe( sender1, mtag, jcomm, &status ); 447  if( 0 != ierr ) 448  { 449  std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n"; 450  return MB_FAILURE; 451  } 452  // get the count of data received from the MPI_Status structure 453  int size_pack; 454  ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack ); 455  if( 0 != ierr ) 456  { 457  std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh " << ierr << "\n"; 458  return MB_FAILURE; 459  } 460  461  /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status); 462  if (0!=ierr) return MB_FAILURE;*/ 463  // now resize the buffer, then receive it 464  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack ); 465  // buffer->reserve(size_pack); 466  467  ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, sender1, mtag, jcomm, &status ); 468  if( 0 != ierr ) 469  { 470  std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n"; 471  return MB_FAILURE; 472  } 473  // now unpack the buffer we just received 474  Range entities; 475  std::vector< std::vector< EntityHandle > > L1hloc, L1hrem; 476  std::vector< std::vector< int > > L1p; 477  std::vector< EntityHandle > L2hloc, L2hrem; 478  std::vector< unsigned int > L2p; 479  480  buffer->reset_ptr( sizeof( int ) ); 481  std::vector< EntityHandle > entities_vec( entities.size() ); 482  std::copy( entities.begin(), entities.end(), entities_vec.begin() ); 483  rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, 484  entities_vec ); 485  delete buffer; 486  if( MB_SUCCESS != rval ) return rval; 487  488  std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) ); 489  // we have to add them to the local set 490  rval = pco->get_moab()->add_entities( local_set, entities ); 491  if( MB_SUCCESS != rval ) return rval; 492  // corr_sizes is the size of primary entities received 493  Range verts = entities.subset_by_dimension( 0 ); 494  Range local_primary_ents = subtract( entities, verts ); 495  if( local_primary_ents.empty() ) 496  { 497  // it is possible that all ents sent were vertices (point cloud) 498  // then consider primary entities the vertices 499  local_primary_ents = verts; 500  } 501  else 502  { 503  // set a tag with the original sender for the primary entity 504  // will be used later for coverage mesh 505  std::vector< int > orig_senders( local_primary_ents.size(), sender1 ); 506  rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] ); 507  } 508  corr_sizes.push_back( (int)local_primary_ents.size() ); 509  510  newEnts.merge( entities ); 511  // make these in split ranges 512  split_ranges[sender1] = local_primary_ents; 513  514 #ifdef VERBOSE 515  std::ostringstream partial_outFile; 516  517  partial_outFile << "part_send_" << sender1 << "." 518  << "recv" << rankInJoin << ".vtk"; 519  520  // the mesh contains ghosts too, but they are not part of mat/neumann set 521  // write in serial the file, to see what tags are missing 522  std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1 523  << " entities: " << entities.size() << std::endl; 524  rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set, 525  1 ); // everything on local set received 526  if( MB_SUCCESS != rval ) return rval; 527 #endif 528  } 529  } 530  // make sure adjacencies are updated on the new elements 531  532  if( newEnts.empty() ) 533  { 534  std::cout << " WARNING: this task did not receive any entities \n"; 535  } 536  // in order for the merging to work, we need to be sure that the adjacencies are updated 537  // (created) 538  Range local_verts = newEnts.subset_by_type( MBVERTEX ); 539  newEnts = subtract( newEnts, local_verts ); 540  Core* mb = (Core*)pco->get_moab(); 541  AEntityFactory* adj_fact = mb->a_entity_factory(); 542  if( !adj_fact->vert_elem_adjacencies() ) 543  adj_fact->create_vert_elem_adjacencies(); 544  else 545  { 546  for( Range::iterator it = newEnts.begin(); it != newEnts.end(); ++it ) 547  { 548  EntityHandle eh = *it; 549  const EntityHandle* conn = NULL; 550  int num_nodes = 0; 551  rval = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval ); 552  adj_fact->notify_create_entity( eh, conn, num_nodes ); 553  } 554  } 555  556  return MB_SUCCESS; 557 }

References moab::Interface::add_entities(), moab::Range::begin(), buffer, compid2, corr_sizes, corr_tasks, moab::AEntityFactory::create_vert_elem_adjacencies(), moab::Range::empty(), moab::Range::end(), entities, ErrorCode, moab::ParallelComm::get_moab(), mb, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TYPE_INTEGER, MBVERTEX, moab::Range::merge(), moab::AEntityFactory::notify_create_entity(), rankInJoin, moab::Range::size(), split_ranges, moab::Range::subset_by_type(), moab::subtract(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), moab::ParallelComm::unpack_buffer(), moab::AEntityFactory::vert_elem_adjacencies(), and moab::Interface::write_file().

Referenced by iMOAB_ReceiveMesh().

◆ receive_tag_values()

ErrorCode moab::ParCommGraph::receive_tag_values ( MPI_Comm  jcomm,
ParallelComm pco,
Range owned,
std::vector< Tag > &  tag_handles 
)

Get the size of the specified tag in bytes

Definition at line 786 of file ParCommGraph.cpp.

790 { 791  // opposite to sending, we will use blocking receives 792  int ierr; 793  MPI_Status status; 794  // basically, owned.size() needs to be equal to sum(corr_sizes) 795  // get info about the tag size, type, etc 796  Core* mb = (Core*)pco->get_moab(); 797  // get info about the tag 798  //! Get the size of the specified tag in bytes 799  ErrorCode rval; 800  int total_bytes_per_entity = 0; 801  std::vector< int > vect_bytes_per_tag; 802 #ifdef VERBOSE 803  std::vector< int > tag_sizes; 804 #endif 805  for( size_t i = 0; i < tag_handles.size(); i++ ) 806  { 807  int bytes_per_tag; 808  rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval ); 809  total_bytes_per_entity += bytes_per_tag; 810  vect_bytes_per_tag.push_back( bytes_per_tag ); 811 #ifdef VERBOSE 812  int tag_size; 813  rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval ); 814  tag_sizes.push_back( tag_size ); 815 #endif 816  } 817  818  int mtag = compid1 + compid2; 819  820  if( graph_type == INITIAL_MIGRATE ) 821  { 822  // std::map<int, Range> split_ranges; 823  // rval = split_owned_range ( owned);MB_CHK_ERR ( rval ); 824  825  // use the buffers data structure to allocate memory for receiving the tags 826  for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ ) 827  { 828  int sender_proc = it->first; 829  Range ents = it->second; // primary entities, with the tag data, we will receive 830  int size_buffer = 4 + total_bytes_per_entity * 831  (int)ents.size(); // hopefully, below 2B; if more, we have a big problem ... 832  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 833  834  buffer->reset_ptr( sizeof( int ) ); 835  836  *( (int*)buffer->mem_ptr ) = size_buffer; 837  // int size_pack = buffer->get_current_size(); // debug check 838  839  ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status ); 840  if( ierr != 0 ) return MB_FAILURE; 841  // now set the tag 842  // copy to tag 843  844  for( size_t i = 0; i < tag_handles.size(); i++ ) 845  { 846  rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) ); 847  buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size(); 848  } 849  delete buffer; // no need for it afterwards 850  MB_CHK_ERR( rval ); 851  } 852  } 853  else if( graph_type == COVERAGE ) // receive buffer, then extract tag data, in a loop 854  { 855  // we know that we will need to receive some tag data in a specific order (by ids stored) 856  // first, get the ids of the local elements, from owned Range; unpack the buffer in order 857  Tag gidTag = mb->globalId_tag(); 858  std::vector< int > gids; 859  gids.resize( owned.size() ); 860  rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval ); 861  std::map< int, EntityHandle > gidToHandle; 862  size_t i = 0; 863  for( Range::iterator it = owned.begin(); it != owned.end(); it++ ) 864  { 865  EntityHandle eh = *it; 866  gidToHandle[gids[i++]] = eh; 867  } 868  // 869  // now, unpack the data and set it to the tag 870  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 871  mit != involved_IDs_map.end(); mit++ ) 872  { 873  int sender_proc = mit->first; 874  std::vector< int >& eids = mit->second; 875  int size_buffer = 4 + total_bytes_per_entity * 876  (int)eids.size(); // hopefully, below 2B; if more, we have a big problem ... 877  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 878  buffer->reset_ptr( sizeof( int ) ); 879  *( (int*)buffer->mem_ptr ) = size_buffer; // this is really not necessary, it should receive this too 880  881  // receive the buffer 882  ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status ); 883  if( ierr != 0 ) return MB_FAILURE; 884 // start copy 885 #ifdef VERBOSE 886  std::ofstream dbfile; 887  std::stringstream outf; 888  outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt"; 889  dbfile.open( outf.str().c_str() ); 890  dbfile << "recvFrom_" << sender_proc << " on proc " << rankInJoin << "\n"; 891 #endif 892  893  // copy tag data from buffer->buff_ptr 894  // data is arranged by tag , and repeat the loop for each entity () 895  // maybe it should be arranged by entity now, not by tag (so one loop for entities, 896  // outside) 897  898  for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); ++it ) 899  { 900  int eID = *it; 901  std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID ); 902  if( mit2 == gidToHandle.end() ) 903  { 904  std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID 905  << "\n"; 906  return MB_FAILURE; 907  } 908  EntityHandle eh = mit2->second; 909  for( i = 0; i < tag_handles.size(); i++ ) 910  { 911  rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval ); 912 #ifdef VERBOSE 913  dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: "; 914  double* vals = (double*)( buffer->buff_ptr ); 915  for( int kk = 0; kk < tag_sizes[i]; kk++ ) 916  { 917  dbfile << " " << *vals; 918  vals++; 919  } 920  dbfile << "\n"; 921 #endif 922  buffer->buff_ptr += vect_bytes_per_tag[i]; 923  } 924  } 925  926  // delete receive buffer 927  delete buffer; 928 #ifdef VERBOSE 929  dbfile.close(); 930 #endif 931  } 932  } 933  else if( graph_type == DOF_BASED ) 934  { 935  // need to fill up the values for each tag, in the order desired, from the buffer received 936  // 937  // get all the tags, for all owned entities, and pack the buffers accordingly 938  // we do not want to get the tags by entity, it may be too expensive 939  std::vector< std::vector< double > > valuesTags; 940  valuesTags.resize( tag_handles.size() ); 941  for( size_t i = 0; i < tag_handles.size(); i++ ) 942  { 943  int bytes_per_tag; 944  rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval ); 945  valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) ); 946  // fill the whole array, we will pick up from here 947  // we will fill this array, using data from received buffer 948  // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval ); 949  } 950  // now, unpack the data and set the tags 951  sendReqs.resize( involved_IDs_map.size() ); 952  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 953  mit != involved_IDs_map.end(); ++mit ) 954  { 955  int sender_proc = mit->first; 956  std::vector< int >& eids = mit->second; 957  std::vector< int >& index_in_values = map_index[sender_proc]; 958  std::vector< int >& index_ptr = map_ptr[sender_proc]; // this is eids.size()+1; 959  int size_buffer = 4 + total_bytes_per_entity * 960  (int)eids.size(); // hopefully, below 2B; if more, we have a big problem ... 961  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 962  buffer->reset_ptr( sizeof( int ) ); 963  964  // receive the buffer 965  ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status ); 966  if( ierr != 0 ) return MB_FAILURE; 967  // use the values in buffer to populate valuesTag arrays, fill it up! 968  int j = 0; 969  for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); ++it, ++j ) 970  { 971  for( size_t i = 0; i < tag_handles.size(); i++ ) 972  { 973  // right now, move just doubles; but it could be any type of tag 974  double val = *( (double*)( buffer->buff_ptr ) ); 975  buffer->buff_ptr += 8; // we know we are working with doubles only !!! 976  for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ ) 977  valuesTags[i][index_in_values[k]] = val; 978  } 979  } 980  // we are done with the buffer in which we received tags, release / delete it 981  delete buffer; 982  } 983  // now we populated the values for all tags; set now the tags! 984  for( size_t i = 0; i < tag_handles.size(); i++ ) 985  { 986  // we will fill this array, using data from received buffer 987  rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval ); 988  } 989  } 990  return MB_SUCCESS; 991 }

References moab::Range::begin(), buffer, compid1, compid2, COVERAGE, DOF_BASED, moab::Range::end(), ErrorCode, moab::ParallelComm::get_moab(), graph_type, INITIAL_MIGRATE, involved_IDs_map, map_index, map_ptr, mb, MB_CHK_ERR, MB_SUCCESS, rankInJoin, sendReqs, moab::Range::size(), and split_ranges.

Referenced by iMOAB_ReceiveElementTag().

◆ receiver()

int moab::ParCommGraph::receiver ( int  index)
inline

Definition at line 148 of file ParCommGraph.hpp.

149  { 150  return receiverTasks[index]; 151  }

References receiverTasks.

Referenced by iMOAB_ReceiveMesh(), and send_graph().

◆ receivers()

const std::vector< int >& moab::ParCommGraph::receivers ( )
inline

Definition at line 210 of file ParCommGraph.hpp.

211  { 212  return receiverTasks; 213  }

References receiverTasks.

Referenced by split_owned_range().

◆ release_send_buffers()

ErrorCode moab::ParCommGraph::release_send_buffers ( )

Definition at line 560 of file ParCommGraph.cpp.

561 { 562  int ierr, nsize = (int)sendReqs.size(); 563  std::vector< MPI_Status > mult_status; 564  mult_status.resize( sendReqs.size() ); 565  ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] ); 566  567  if( ierr != 0 ) return MB_FAILURE; 568  // now we can free all buffers 569  delete[] comm_graph; 570  comm_graph = NULL; 571  std::vector< ParallelComm::Buffer* >::iterator vit; 572  for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit ) 573  delete( *vit ); 574  localSendBuffs.clear(); 575  return MB_SUCCESS; 576 }

References comm_graph, localSendBuffs, MB_SUCCESS, and sendReqs.

◆ send_graph()

ErrorCode moab::ParCommGraph::send_graph ( MPI_Comm  jcomm)

use tag 10 to send size and tag 20 to send the packed array

Definition at line 265 of file ParCommGraph.cpp.

266 { 267  if( is_root_sender() ) 268  { 269  int ierr; 270  // will need to build a communication graph, because each sender knows now to which receiver 271  // to send data the receivers need to post receives for each sender that will send data to 272  // them will need to gather on rank 0 on the sender comm, global ranks of sender with 273  // receivers to send build communication matrix, each receiver will receive from what sender 274  275  std::vector< int > packed_recv_array; 276  ErrorCode rval = pack_receivers_graph( packed_recv_array ); 277  if( MB_SUCCESS != rval ) return rval; 278  279  int size_pack_array = (int)packed_recv_array.size(); 280  comm_graph = new int[size_pack_array + 1]; 281  comm_graph[0] = size_pack_array; 282  for( int k = 0; k < size_pack_array; k++ ) 283  comm_graph[k + 1] = packed_recv_array[k]; 284  // will add 2 requests 285  /// use tag 10 to send size and tag 20 to send the packed array 286  sendReqs.resize( 1 ); 287  // do not send the size in advance, because we use probe now 288  /*ierr = MPI_Isend(&comm_graph[0], 1, MPI_INT, receiver(0), 10, jcomm, &sendReqs[0]); // we 289  have to use global communicator if (ierr!=0) return MB_FAILURE;*/ 290  int mtag = compid2; 291  ierr = MPI_Isend( &comm_graph[1], size_pack_array, MPI_INT, receiver( 0 ), mtag, jcomm, 292  &sendReqs[0] ); // we have to use global communicator 293  if( ierr != 0 ) return MB_FAILURE; 294  } 295  return MB_SUCCESS; 296 }

References comm_graph, compid2, ErrorCode, is_root_sender(), MB_SUCCESS, pack_receivers_graph(), receiver(), and sendReqs.

Referenced by iMOAB_SendMesh(), and send_graph_partition().

◆ send_graph_partition()

ErrorCode moab::ParCommGraph::send_graph_partition ( ParallelComm pco,
MPI_Comm  jcomm 
)

Definition at line 1505 of file ParCommGraph.cpp.

1506 { 1507  // first, accumulate the info to root of sender; use gatherv 1508  // first, accumulate number of receivers from each sender, to the root receiver 1509  int numberReceivers = 1510  (int)split_ranges.size(); // these are ranges of elements to be sent to each receiver, from this task 1511  int nSenders = (int)senderTasks.size(); // 1512  // this sender will have to send to this many receivers 1513  std::vector< int > displs( 1 ); // displacements for gatherv 1514  std::vector< int > counts( 1 ); 1515  if( is_root_sender() ) 1516  { 1517  displs.resize( nSenders + 1 ); 1518  counts.resize( nSenders ); 1519  } 1520  1521  int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() ); 1522  if( ierr != MPI_SUCCESS ) return MB_FAILURE; 1523  // compute now displacements 1524  if( is_root_sender() ) 1525  { 1526  displs[0] = 0; 1527  for( int k = 0; k < nSenders; k++ ) 1528  { 1529  displs[k + 1] = displs[k] + counts[k]; 1530  } 1531  } 1532  std::vector< int > buffer; 1533  if( is_root_sender() ) buffer.resize( displs[nSenders] ); // the last one will have the total count now 1534  1535  std::vector< int > recvs; 1536  for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ ) 1537  { 1538  recvs.push_back( mit->first ); 1539  } 1540  ierr = 1541  MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() ); 1542  if( ierr != MPI_SUCCESS ) return MB_FAILURE; 1543  1544  // now form recv_graph map; points from the 1545  // now form the graph to be sent to the other side; only on root 1546  if( is_root_sender() ) 1547  { 1548 #ifdef GRAPH_INFO 1549  std::ofstream dbfileSender; 1550  std::stringstream outf; 1551  outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt"; 1552  dbfileSender.open( outf.str().c_str() ); 1553  dbfileSender << " number senders: " << nSenders << "\n"; 1554  dbfileSender << " senderRank \treceivers \n"; 1555  for( int k = 0; k < nSenders; k++ ) 1556  { 1557  int indexInBuff = displs[k]; 1558  int senderTask = senderTasks[k]; 1559  dbfileSender << senderTask << "\t\t"; 1560  for( int j = 0; j < counts[k]; j++ ) 1561  { 1562  int recvTask = buffer[indexInBuff + j]; 1563  dbfileSender << recvTask << " "; 1564  } 1565  dbfileSender << "\n"; 1566  } 1567  dbfileSender.close(); 1568 #endif 1569  for( int k = 0; k < nSenders; k++ ) 1570  { 1571  int indexInBuff = displs[k]; 1572  int senderTask = senderTasks[k]; 1573  for( int j = 0; j < counts[k]; j++ ) 1574  { 1575  int recvTask = buffer[indexInBuff + j]; 1576  recv_graph[recvTask].push_back( senderTask ); // this will be packed and sent to root receiver, with 1577  // nonblocking send 1578  } 1579  } 1580 #ifdef GRAPH_INFO 1581  std::ofstream dbfile; 1582  std::stringstream outf2; 1583  outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt"; 1584  dbfile.open( outf2.str().c_str() ); 1585  dbfile << " number receivers: " << recv_graph.size() << "\n"; 1586  dbfile << " receiverRank \tsenders \n"; 1587  for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ ) 1588  { 1589  int recvTask = mit->first; 1590  std::vector< int >& senders = mit->second; 1591  dbfile << recvTask << "\t\t"; 1592  for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ ) 1593  dbfile << *vit << " "; 1594  dbfile << "\n"; 1595  } 1596  dbfile.close(); 1597 #endif 1598  // this is the same as trivial partition 1599  ErrorCode rval = send_graph( jcomm );MB_CHK_ERR( rval ); 1600  } 1601  1602  return MB_SUCCESS; 1603 }

References buffer, moab::ParallelComm::comm(), compid1, compid2, ErrorCode, is_root_sender(), MB_CHK_ERR, MB_SUCCESS, recv_graph, send_graph(), senders(), senderTasks, and split_ranges.

Referenced by iMOAB_SendMesh().

◆ send_mesh_parts()

ErrorCode moab::ParCommGraph::send_mesh_parts ( MPI_Comm  jcomm,
ParallelComm pco,
Range owned 
)

not anymore !

Definition at line 300 of file ParCommGraph.cpp.

301 { 302  303  ErrorCode rval; 304  if( split_ranges.empty() ) // in trivial partition 305  { 306  rval = split_owned_range( rankInGroup1, owned ); 307  if( rval != MB_SUCCESS ) return rval; 308  // we know this on the sender side: 309  corr_tasks = sender_graph[senderTasks[rankInGroup1]]; // copy 310  corr_sizes = sender_sizes[senderTasks[rankInGroup1]]; // another copy 311  } 312  int mtag = compid2; 313  int indexReq = 0; 314  int ierr; // MPI error 315  if( is_root_sender() ) indexReq = 1; // for sendReqs 316  sendReqs.resize( indexReq + split_ranges.size() ); 317  for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ ) 318  { 319  int receiver_proc = it->first; 320  Range ents = it->second; 321  322  // add necessary vertices too 323  Range verts; 324  rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION ); 325  if( rval != MB_SUCCESS ) 326  { 327  std::cout << " can't get adjacencies. for entities to send\n"; 328  return rval; 329  } 330  ents.merge( verts ); 331  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE ); 332  buffer->reset_ptr( sizeof( int ) ); 333  rval = pco->pack_buffer( ents, false, true, false, -1, buffer ); 334  if( rval != MB_SUCCESS ) 335  { 336  std::cout << " can't pack buffer for entities to send\n"; 337  return rval; 338  } 339  int size_pack = buffer->get_current_size(); 340  341  // TODO there could be an issue with endian things; check !!!!! 342  // we are sending the size of the buffer first as an int!!! 343  /// not anymore ! 344  /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm, 345  &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE; 346  indexReq++;*/ 347  348  ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm, 349  &sendReqs[indexReq] ); // we have to use global communicator 350  if( ierr != 0 ) return MB_FAILURE; 351  indexReq++; 352  localSendBuffs.push_back( buffer ); 353  } 354  return MB_SUCCESS; 355 }

References buffer, compid2, corr_sizes, corr_tasks, ErrorCode, moab::Interface::get_adjacencies(), moab::ParallelComm::get_moab(), moab::ParallelComm::INITIAL_BUFF_SIZE, is_root_sender(), localSendBuffs, MB_SUCCESS, moab::Range::merge(), moab::ParallelComm::pack_buffer(), rankInGroup1, sender_graph, sender_sizes, senderTasks, sendReqs, split_owned_range(), split_ranges, and moab::Interface::UNION.

Referenced by iMOAB_SendMesh().

◆ send_tag_values()

ErrorCode moab::ParCommGraph::send_tag_values ( MPI_Comm  jcomm,
ParallelComm pco,
Range owned,
std::vector< Tag > &  tag_handles 
)

Get the size of the specified tag in bytes

Definition at line 580 of file ParCommGraph.cpp.

584 { 585  // basically, owned.size() needs to be equal to sum(corr_sizes) 586  // get info about the tag size, type, etc 587  int ierr; 588  Core* mb = (Core*)pco->get_moab(); 589  // get info about the tag 590  //! Get the size of the specified tag in bytes 591  int total_bytes_per_entity = 0; // we need to know, to allocate buffers 592  ErrorCode rval; 593  std::vector< int > vect_bytes_per_tag; 594 #ifdef VERBOSE 595  std::vector< int > tag_sizes; 596 #endif 597  for( size_t i = 0; i < tag_handles.size(); i++ ) 598  { 599  int bytes_per_tag; 600  rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval ); 601  int tag_size1; // length 602  rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval ); 603  if( graph_type == DOF_BASED ) 604  bytes_per_tag = bytes_per_tag / tag_size1; // we know we have one double per tag , per ID sent; 605  // could be 8 for double, 4 for int, etc 606  total_bytes_per_entity += bytes_per_tag; 607  vect_bytes_per_tag.push_back( bytes_per_tag ); 608 #ifdef VERBOSE 609  int tag_size; 610  rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval ); 611  tag_sizes.push_back( tag_size ); 612 #endif 613  } 614  615  int mtag = compid1 + compid2; // used as mpi tag to differentiate a little the messages 616  int indexReq = 0; 617  if( graph_type == INITIAL_MIGRATE ) // original send 618  { 619  // use the buffers data structure to allocate memory for sending the tags 620  sendReqs.resize( split_ranges.size() ); 621  622  for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ ) 623  { 624  int receiver_proc = it->first; 625  Range ents = it->second; // primary entities, with the tag data 626  int size_buffer = 4 + total_bytes_per_entity * 627  (int)ents.size(); // hopefully, below 2B; if more, we have a big problem ... 628  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 629  630  buffer->reset_ptr( sizeof( int ) ); 631  for( size_t i = 0; i < tag_handles.size(); i++ ) 632  { 633  // copy tag data to buffer->buff_ptr, and send the buffer (we could have used 634  // regular char arrays) 635  rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval ); 636  // advance the butter 637  buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size(); 638  } 639  *( (int*)buffer->mem_ptr ) = size_buffer; 640  // int size_pack = buffer->get_current_size(); // debug check 641  642  ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm, 643  &sendReqs[indexReq] ); // we have to use global communicator 644  if( ierr != 0 ) return MB_FAILURE; 645  indexReq++; 646  localSendBuffs.push_back( buffer ); // we will release them after nonblocking sends are completed 647  } 648  } 649  else if( graph_type == COVERAGE ) 650  { 651  // we know that we will need to send some tag data in a specific order 652  // first, get the ids of the local elements, from owned Range; arrange the buffer in order 653  // of increasing global id 654  Tag gidTag = mb->globalId_tag(); 655  std::vector< int > gids; 656  gids.resize( owned.size() ); 657  rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval ); 658  std::map< int, EntityHandle > gidToHandle; 659  size_t i = 0; 660  for( Range::iterator it = owned.begin(); it != owned.end(); it++ ) 661  { 662  EntityHandle eh = *it; 663  gidToHandle[gids[i++]] = eh; 664  } 665  // now, pack the data and send it 666  sendReqs.resize( involved_IDs_map.size() ); 667  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 668  mit != involved_IDs_map.end(); mit++ ) 669  { 670  int receiver_proc = mit->first; 671  std::vector< int >& eids = mit->second; 672  int size_buffer = 4 + total_bytes_per_entity * 673  (int)eids.size(); // hopefully, below 2B; if more, we have a big problem ... 674  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 675  buffer->reset_ptr( sizeof( int ) ); 676 #ifdef VERBOSE 677  std::ofstream dbfile; 678  std::stringstream outf; 679  outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt"; 680  dbfile.open( outf.str().c_str() ); 681  dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n"; 682 #endif 683  // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular 684  // char arrays) pack data by tag, to be consistent with above, even though we loop 685  // through the entities for each tag 686  687  for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ ) 688  { 689  int eID = *it; 690  EntityHandle eh = gidToHandle[eID]; 691  for( i = 0; i < tag_handles.size(); i++ ) 692  { 693  rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) ); 694  if( rval != MB_SUCCESS ) 695  { 696  delete buffer; // free parallel comm buffer first 697  698  MB_SET_ERR( rval, "Tag get data failed" ); 699  } 700 #ifdef VERBOSE 701  dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: "; 702  double* vals = (double*)( buffer->buff_ptr ); 703  for( int kk = 0; kk < tag_sizes[i]; kk++ ) 704  { 705  dbfile << " " << *vals; 706  vals++; 707  } 708  dbfile << "\n"; 709 #endif 710  buffer->buff_ptr += vect_bytes_per_tag[i]; 711  } 712  } 713  714 #ifdef VERBOSE 715  dbfile.close(); 716 #endif 717  *( (int*)buffer->mem_ptr ) = size_buffer; 718  // int size_pack = buffer->get_current_size(); // debug check 719  ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm, 720  &sendReqs[indexReq] ); // we have to use global communicator 721  if( ierr != 0 ) return MB_FAILURE; 722  indexReq++; 723  localSendBuffs.push_back( buffer ); // we will release them after nonblocking sends are completed 724  } 725  } 726  else if( graph_type == DOF_BASED ) 727  { 728  // need to fill up the buffer, in the order desired, send it 729  // get all the tags, for all owned entities, and pack the buffers accordingly 730  // we do not want to get the tags by entity, it may be too expensive 731  std::vector< std::vector< double > > valuesTags; 732  valuesTags.resize( tag_handles.size() ); 733  for( size_t i = 0; i < tag_handles.size(); i++ ) 734  { 735  int bytes_per_tag; 736  rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval ); 737  valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) ); 738  // fill the whole array, we will pick up from here 739  rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval ); 740  } 741  // now, pack the data and send it 742  sendReqs.resize( involved_IDs_map.size() ); 743  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); 744  mit != involved_IDs_map.end(); ++mit ) 745  { 746  int receiver_proc = mit->first; 747  std::vector< int >& eids = mit->second; 748  std::vector< int >& index_in_values = map_index[receiver_proc]; 749  std::vector< int >& index_ptr = map_ptr[receiver_proc]; // this is eids.size()+1; 750  int size_buffer = 4 + total_bytes_per_entity * 751  (int)eids.size(); // hopefully, below 2B; if more, we have a big problem ... 752  ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer ); 753  buffer->reset_ptr( sizeof( int ) ); 754 #ifdef VERBOSE 755  std::ofstream dbfile; 756  std::stringstream outf; 757  outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt"; 758  dbfile.open( outf.str().c_str() ); 759  dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n"; 760 #endif 761  // copy tag data to buffer->buff_ptr, and send the buffer 762  // pack data by tag, to be consistent with above 763  int j = 0; 764  for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ ) 765  { 766  int index_in_v = index_in_values[index_ptr[j]]; 767  for( size_t i = 0; i < tag_handles.size(); i++ ) 768  { 769  // right now, move just doubles; but it could be any type of tag 770  *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v]; 771  buffer->buff_ptr += 8; // we know we are working with doubles only !!! 772  } 773  }; 774  *( (int*)buffer->mem_ptr ) = size_buffer; 775  // int size_pack = buffer->get_current_size(); // debug check 776  ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm, 777  &sendReqs[indexReq] ); // we have to use global communicator 778  if( ierr != 0 ) return MB_FAILURE; 779  indexReq++; 780  localSendBuffs.push_back( buffer ); // we will release them after nonblocking sends are completed 781  } 782  } 783  return MB_SUCCESS; 784 }

References moab::Range::begin(), buffer, compid1, compid2, COVERAGE, DOF_BASED, moab::Range::end(), ErrorCode, moab::ParallelComm::get_moab(), graph_type, INITIAL_MIGRATE, involved_IDs_map, localSendBuffs, map_index, map_ptr, mb, MB_CHK_ERR, MB_SET_ERR, MB_SUCCESS, rankInJoin, sendReqs, moab::Range::size(), and split_ranges.

Referenced by iMOAB_SendElementTag().

◆ sender()

int moab::ParCommGraph::sender ( int  index)
inline

Definition at line 143 of file ParCommGraph.hpp.

144  { 145  return senderTasks[index]; 146  }

References senderTasks.

Referenced by receive_comm_graph().

◆ senders()

const std::vector< int >& moab::ParCommGraph::senders ( )
inline

Definition at line 206 of file ParCommGraph.hpp.

207  { 208  return senderTasks; 209  } // reference copy; refers to sender tasks in joint comm

References senderTasks.

Referenced by pack_receivers_graph(), and send_graph_partition().

◆ set_context_id()

void moab::ParCommGraph::set_context_id ( int  other_id)
inline

Definition at line 166 of file ParCommGraph.hpp.

167  { 168  context_id = other_id; 169  }

References context_id.

◆ set_cover_set()

void moab::ParCommGraph::set_cover_set ( EntityHandle  cover)
inline

Definition at line 175 of file ParCommGraph.hpp.

176  { 177  cover_set = cover; 178  }

References cover_set.

◆ set_split_ranges()

ErrorCode moab::ParCommGraph::set_split_ranges ( int  comp,
TupleList TLBackToComp1,
std::vector< int > &  valuesComp1,
int  lenTag,
Range ents_of_interest,
int  type 
)

Definition at line 1264 of file ParCommGraph.cpp.

1270 { 1271  // settle split_ranges // same role as partitioning 1272  if( rootSender ) std::cout << " find split_ranges on component " << comp << " according to read map \n"; 1273  // Vector to store element 1274  // with respective present index 1275  int n = TLBackToComp1.get_n(); 1276  // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some 1277  // cases 1278  std::map< int, std::set< int > > uniqueIDs; 1279  1280  for( int i = 0; i < n; i++ ) 1281  { 1282  int to_proc = TLBackToComp1.vi_wr[3 * i + 2]; 1283  int globalId = TLBackToComp1.vi_wr[3 * i + 1]; 1284  uniqueIDs[to_proc].insert( globalId ); 1285  } 1286  1287  for( size_t i = 0; i < ents_of_interest.size(); i++ ) 1288  { 1289  EntityHandle ent = ents_of_interest[i]; 1290  for( int j = 0; j < lenTag; j++ ) 1291  { 1292  int marker = valuesComp1[i * lenTag + j]; 1293  for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ ) 1294  { 1295  int proc = mit->first; 1296  std::set< int >& setIds = mit->second; 1297  if( setIds.find( marker ) != setIds.end() ) 1298  { 1299  split_ranges[proc].insert( ent ); 1300  } 1301  } 1302  } 1303  } 1304  1305  return MB_SUCCESS; 1306 }

References moab::TupleList::get_n(), MB_SUCCESS, rootSender, moab::Range::size(), split_ranges, and moab::TupleList::vi_wr.

◆ SetReceivingAfterCoverage()

void moab::ParCommGraph::SetReceivingAfterCoverage ( std::map< int, std::set< int > > &  idsFromProcs)

Definition at line 1023 of file ParCommGraph.cpp.

1025 { 1026  for( auto mt = idsFromProcs.begin(); mt != idsFromProcs.end(); ++mt ) 1027  { 1028  int fromProc = mt->first; 1029  std::set< int >& setIds = mt->second; 1030  involved_IDs_map[fromProc].resize( setIds.size() ); 1031  std::vector< int >& listIDs = involved_IDs_map[fromProc]; 1032  size_t indx = 0; 1033  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ ) 1034  { 1035  int valueID = *st; 1036  listIDs[indx++] = valueID; 1037  } 1038  } 1039  graph_type = COVERAGE; 1040  return; 1041 }

References COVERAGE, graph_type, and involved_IDs_map.

◆ settle_comm_by_ids()

void moab::ParCommGraph::settle_comm_by_ids ( int  comp,
TupleList TLBackToComp,
std::vector< int > &  valuesComp 
)

Definition at line 1043 of file ParCommGraph.cpp.

1044 { 1045  // settle comm graph on comp 1046  if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n"; 1047  int n = TLBackToComp.get_n(); 1048  // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some 1049  // cases 1050  std::map< int, std::set< int > > uniqueIDs; 1051  for( int i = 0; i < n; i++ ) 1052  { 1053  int to_proc = TLBackToComp.vi_wr[3 * i + 2]; 1054  int globalId = TLBackToComp.vi_wr[3 * i + 1]; 1055  uniqueIDs[to_proc].insert( globalId ); 1056  } 1057  1058  // Vector to store element 1059  // with respective present index 1060  std::vector< std::pair< int, int > > vp; 1061  vp.reserve( valuesComp.size() ); 1062  1063  // Inserting element in pair vector 1064  // to keep track of previous indexes in valuesComp 1065  for( size_t i = 0; i < valuesComp.size(); ++i ) 1066  { 1067  vp.push_back( std::make_pair( valuesComp[i], i ) ); 1068  } 1069  // Sorting pair vector 1070  sort( vp.begin(), vp.end() ); 1071  1072  // vp[i].first, second 1073  1074  // count now how many times some value appears in ordered (so in valuesComp) 1075  for( auto it = uniqueIDs.begin(); it != uniqueIDs.end(); ++it ) 1076  { 1077  int procId = it->first; 1078  std::set< int >& nums = it->second; 1079  std::vector< int >& indx = map_ptr[procId]; 1080  std::vector< int >& indices = map_index[procId]; 1081  indx.resize( nums.size() + 1 ); 1082  int indexInVp = 0; 1083  int indexVal = 0; 1084  indx[0] = 0; // start from 0 1085  for( auto sst = nums.begin(); sst != nums.end(); ++sst, ++indexVal ) 1086  { 1087  int val = *sst; 1088  involved_IDs_map[procId].push_back( val ); 1089  indx[indexVal + 1] = indx[indexVal]; 1090  while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) ) // should be equal ! 1091  { 1092  if( vp[indexInVp].first == val ) 1093  { 1094  indx[indexVal + 1]++; 1095  indices.push_back( vp[indexInVp].second ); 1096  } 1097  indexInVp++; 1098  } 1099  } 1100  } 1101 #ifdef VERBOSE 1102  std::stringstream f1; 1103  std::ofstream dbfile; 1104  f1 << "Involve_" << comp << "_" << rankInJoin << ".txt"; 1105  dbfile.open( f1.str().c_str() ); 1106  for( auto mit = involved_IDs_map.begin(); mit != involved_IDs_map.end(); 1107  ++mit ) 1108  { 1109  int corrTask = mit->first; 1110  std::vector< int >& corrIds = mit->second; 1111  std::vector< int >& indx = map_ptr[corrTask]; 1112  std::vector< int >& indices = map_index[corrTask]; 1113  1114  dbfile << " towards proc " << corrTask << " \n"; 1115  for( int i = 0; i < (int)corrIds.size(); i++ ) 1116  { 1117  dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ") : "; 1118  for( int j = indx[i]; j < indx[i + 1]; j++ ) 1119  dbfile << indices[j] << " "; 1120  dbfile << "\n"; 1121  } 1122  dbfile << " \n"; 1123  } 1124  dbfile.close(); 1125 #endif 1126  1127  graph_type = DOF_BASED; 1128  // now we need to fill back and forth information, needed to fill the arrays 1129  // for example, from spectral to involved_IDs_map, in case we want to send data from 1130  // spectral to phys 1131 }

References DOF_BASED, moab::GeomUtil::first(), moab::TupleList::get_n(), graph_type, involved_IDs_map, map_index, map_ptr, rankInJoin, rootReceiver, rootSender, and moab::TupleList::vi_wr.

Referenced by iMOAB_ComputeCommGraph().

◆ settle_send_graph()

ErrorCode moab::ParCommGraph::settle_send_graph ( TupleList TLcovIDs)

Definition at line 995 of file ParCommGraph.cpp.

996 { 997  // fill involved_IDs_map with data 998  // will have "receiving proc" and global id of element 999  int n = TLcovIDs.get_n(); 1000  graph_type = COVERAGE; // do not rely only on involved_IDs_map.size(); this can be 0 in some cases 1001  for( int i = 0; i < n; i++ ) 1002  { 1003  int to_proc = TLcovIDs.vi_wr[2 * i]; 1004  int globalIdElem = TLcovIDs.vi_wr[2 * i + 1]; 1005  involved_IDs_map[to_proc].push_back( globalIdElem ); 1006  } 1007 #ifdef VERBOSE 1008  for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end(); 1009  ++mit ) 1010  { 1011  std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl; 1012  for( size_t i = 0; i < mit->second.size(); i++ ) 1013  { 1014  std::cout << " " << mit->second[i]; 1015  } 1016  std::cout << std::endl; 1017  } 1018 #endif 1019  return MB_SUCCESS; 1020 }

References COVERAGE, moab::TupleList::get_n(), graph_type, involved_IDs_map, MB_SUCCESS, and moab::TupleList::vi_wr.

◆ split_owned_range() [1/2]

ErrorCode moab::ParCommGraph::split_owned_range ( int  sender_rank,
Range owned 
)

Definition at line 217 of file ParCommGraph.cpp.

218 { 219  int senderTask = senderTasks[sender_rank]; 220  std::vector< int >& distribution = sender_sizes[senderTask]; 221  std::vector< int >& receivers = sender_graph[senderTask]; 222  if( distribution.size() != receivers.size() ) // 223  return MB_FAILURE; 224  225  Range current = owned; // get the full range first, then we will subtract stuff, for 226  // the following ranges 227  228  Range rleftover = current; 229  for( size_t k = 0; k < receivers.size(); k++ ) 230  { 231  Range newr; 232  newr.insert( current.begin(), current.begin() + distribution[k] ); 233  split_ranges[receivers[k]] = newr; 234  235  rleftover = subtract( current, newr ); 236  current = rleftover; 237  } 238  239  return MB_SUCCESS; 240 }

References moab::Range::begin(), moab::Range::insert(), MB_SUCCESS, receivers(), sender_graph, sender_sizes, senderTasks, split_ranges, and moab::subtract().

Referenced by send_mesh_parts().

◆ split_owned_range() [2/2]

ErrorCode moab::ParCommGraph::split_owned_range ( Range owned)

Definition at line 243 of file ParCommGraph.cpp.

244 { 245  if( corr_tasks.size() != corr_sizes.size() ) // 246  return MB_FAILURE; 247  248  Range current = owned; // get the full range first, then we will subtract stuff, for 249  // the following ranges 250  251  Range rleftover = current; 252  for( size_t k = 0; k < corr_tasks.size(); k++ ) 253  { 254  Range newr; 255  newr.insert( current.begin(), current.begin() + corr_sizes[k] ); 256  split_ranges[corr_tasks[k]] = newr; 257  258  rleftover = subtract( current, newr ); 259  current = rleftover; 260  } 261  262  return MB_SUCCESS; 263 }

References moab::Range::begin(), corr_sizes, corr_tasks, moab::Range::insert(), MB_SUCCESS, split_ranges, and moab::subtract().

Member Data Documentation

◆ comm

MPI_Comm moab::ParCommGraph::comm
private

Definition at line 263 of file ParCommGraph.hpp.

Referenced by ParCommGraph().

◆ comm_graph

int* moab::ParCommGraph::comm_graph
private

Definition at line 288 of file ParCommGraph.hpp.

Referenced by ParCommGraph(), release_send_buffers(), and send_graph().

◆ compid1

int moab::ParCommGraph::compid1
private

◆ compid2

◆ context_id

int moab::ParCommGraph::context_id
private

Definition at line 271 of file ParCommGraph.hpp.

Referenced by get_context_id(), ParCommGraph(), and set_context_id().

◆ corr_sizes

std::vector< int > moab::ParCommGraph::corr_sizes
private

Definition at line 302 of file ParCommGraph.hpp.

Referenced by receive_mesh(), send_mesh_parts(), and split_owned_range().

◆ corr_tasks

std::vector< int > moab::ParCommGraph::corr_tasks
private

Definition at line 300 of file ParCommGraph.hpp.

Referenced by receive_mesh(), send_mesh_parts(), and split_owned_range().

◆ cover_set

EntityHandle moab::ParCommGraph::cover_set
private

Definition at line 272 of file ParCommGraph.hpp.

Referenced by get_cover_set(), ParCommGraph(), and set_cover_set().

◆ graph_type

◆ involved_IDs_map

std::map< int, std::vector< int > > moab::ParCommGraph::involved_IDs_map
private

◆ joinSize

int moab::ParCommGraph::joinSize
private

Definition at line 269 of file ParCommGraph.hpp.

Referenced by ParCommGraph().

◆ localSendBuffs

std::vector< ParallelComm::Buffer* > moab::ParCommGraph::localSendBuffs
private

Definition at line 285 of file ParCommGraph.hpp.

Referenced by release_send_buffers(), send_mesh_parts(), and send_tag_values().

◆ map_index

std::map< int, std::vector< int > > moab::ParCommGraph::map_index
private

Definition at line 312 of file ParCommGraph.hpp.

Referenced by receive_tag_values(), send_tag_values(), and settle_comm_by_ids().

◆ map_ptr

std::map< int, std::vector< int > > moab::ParCommGraph::map_ptr
private

Definition at line 313 of file ParCommGraph.hpp.

Referenced by receive_tag_values(), send_tag_values(), and settle_comm_by_ids().

◆ rankInGroup1

int moab::ParCommGraph::rankInGroup1
private

Definition at line 268 of file ParCommGraph.hpp.

Referenced by dump_comm_information(), ParCommGraph(), and send_mesh_parts().

◆ rankInGroup2

int moab::ParCommGraph::rankInGroup2
private

Definition at line 268 of file ParCommGraph.hpp.

Referenced by dump_comm_information(), and ParCommGraph().

◆ rankInJoin

int moab::ParCommGraph::rankInJoin
private

◆ receiverTasks

std::vector< int > moab::ParCommGraph::receiverTasks
private

◆ recv_graph

std::map< int, std::vector< int > > moab::ParCommGraph::recv_graph
private

◆ recv_sizes

std::map< int, std::vector< int > > moab::ParCommGraph::recv_sizes
private

Definition at line 279 of file ParCommGraph.hpp.

Referenced by compute_trivial_partition().

◆ rootReceiver

bool moab::ParCommGraph::rootReceiver
private

◆ rootSender

bool moab::ParCommGraph::rootSender
private

◆ sender_graph

std::map< int, std::vector< int > > moab::ParCommGraph::sender_graph
private

Definition at line 281 of file ParCommGraph.hpp.

Referenced by compute_trivial_partition(), send_mesh_parts(), and split_owned_range().

◆ sender_sizes

std::map< int, std::vector< int > > moab::ParCommGraph::sender_sizes
private

Definition at line 283 of file ParCommGraph.hpp.

Referenced by compute_trivial_partition(), send_mesh_parts(), and split_owned_range().

◆ senderTasks

std::vector< int > moab::ParCommGraph::senderTasks
private

◆ sendReqs

std::vector< MPI_Request > moab::ParCommGraph::sendReqs
private

◆ split_ranges


The documentation for this class was generated from the following files: