11 #ifdef MOAB_HAVE_ZOLTAN
21 : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
37 if( MPI_SUCCESS != mpierr )
41 if( MPI_SUCCESS != mpierr )
84 MPI_Comm_group( joincomm, &global_grp );
88 MPI_Group_size( group, &grp_size );
89 std::vector< int > rks( grp_size );
90 ranks.resize( grp_size );
92 for(
int i = 0; i < grp_size; i++ )
95 MPI_Group_translate_ranks( group, grp_size, rks.data(), global_grp, ranks.data() );
96 MPI_Group_free( &global_grp );
108 if( numElemsPerTaskInGroup1.size() !=
senderTasks.size() )
113 std::vector< int > accum;
114 accum.push_back( 0 );
118 for(
size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
120 total_elems += numElemsPerTaskInGroup1[k];
121 accum.push_back( total_elems );
126 int num_per_receiver = (int)( total_elems / num_recv );
127 int leftover = total_elems - num_per_receiver * num_recv;
130 std::vector< int > starts;
131 starts.resize( num_recv + 1 );
133 for(
int k = 0; k < num_recv; k++ )
135 starts[k + 1] = starts[k] + num_per_receiver;
136 if( k < leftover ) starts[k + 1]++;
141 int lastUsedReceiverRank = 0;
142 for(
int j = 0; j < num_senders; j++ )
146 for(
int k = lastUsedReceiverRank; k < num_recv; k++ )
149 if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
157 if( starts[k] >= accum[j] )
159 if( starts[k + 1] >= accum[j + 1] )
160 sizeOverlap = accum[j + 1] - starts[k];
162 sizeOverlap = starts[k + 1] - starts[k];
166 if( starts[k + 1] >= accum[j + 1] )
167 sizeOverlap = accum[j + 1] - accum[j];
169 sizeOverlap = starts[k + 1] - accum[j];
174 if( starts[k] > accum[j + 1] )
176 lastUsedReceiverRank = k - 1;
201 std::cout <<
" WARNING: empty partitions, some receiver tasks will receive nothing.\n";
203 for( std::map<
int, std::vector< int > >::iterator it =
recv_graph.begin(); it !=
recv_graph.end(); it++ )
205 int recv = it->first;
206 std::vector< int >&
senders = it->second;
207 packed_recv_array.push_back( recv );
208 packed_recv_array.push_back( (
int)
senders.size() );
210 for(
int k = 0; k < (int)
senders.size(); k++ )
211 packed_recv_array.push_back(
senders[k] );
220 std::vector< int >& distribution =
sender_sizes[senderTask];
222 if( distribution.size() !=
receivers.size() )
225 Range current = owned;
228 Range rleftover = current;
229 for(
size_t k = 0; k <
receivers.size(); k++ )
235 rleftover =
subtract( current, newr );
248 Range current = owned;
251 Range rleftover = current;
252 for(
size_t k = 0; k <
corr_tasks.size(); k++ )
258 rleftover =
subtract( current, newr );
275 std::vector< int > packed_recv_array;
279 int size_pack_array = (int)packed_recv_array.size();
282 for(
int k = 0; k < size_pack_array; k++ )
291 ierr = MPI_Isend( &
comm_graph[1], size_pack_array, MPI_INT,
receiver( 0 ), mtag, jcomm,
293 if( ierr != 0 )
return MB_FAILURE;
319 int receiver_proc = it->first;
320 Range ents = it->second;
327 std::cout <<
" can't get adjacencies. for entities to send\n";
332 buffer->reset_ptr(
sizeof(
int ) );
336 std::cout <<
" can't pack buffer for entities to send\n";
339 int size_pack =
buffer->get_current_size();
348 ierr = MPI_Isend(
buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm,
350 if( ierr != 0 )
return MB_FAILURE;
362 MPI_Comm receive = pco->
comm();
363 int size_pack_array, ierr;
376 ierr = MPI_Probe(
sender( 0 ), mtag, jcomm, &status );
379 std::cout <<
" MPI_Probe failure: " << ierr <<
"\n";
383 ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
386 std::cout <<
" MPI_Get_count failure: " << ierr <<
"\n";
390 std::cout <<
" receive comm graph size: " << size_pack_array <<
"\n";
392 pack_array.resize( size_pack_array );
393 ierr = MPI_Recv( pack_array.data(), size_pack_array, MPI_INT,
sender( 0 ), mtag, jcomm, &status );
394 if( 0 != ierr )
return MB_FAILURE;
396 std::cout <<
" receive comm graph ";
397 for(
int k = 0; k < (int)pack_array.size(); k++ )
398 std::cout <<
" " << pack_array[k];
404 ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
405 if( 0 != ierr )
return MB_FAILURE;
406 pack_array.resize( size_pack_array );
407 ierr = MPI_Bcast( pack_array.data(), size_pack_array, MPI_INT, 0, receive );
408 if( 0 != ierr )
return MB_FAILURE;
415 std::vector< int >& senders_local )
432 if( !senders_local.empty() )
434 for(
size_t k = 0; k < senders_local.size(); k++ )
436 int sender1 = senders_local[k];
446 ierr = MPI_Probe( sender1, mtag, jcomm, &status );
449 std::cout <<
" MPI_Probe failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
454 ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
457 std::cout <<
" MPI_Get_count failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
467 ierr = MPI_Recv(
buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, sender1, mtag, jcomm, &status );
470 std::cout <<
" MPI_Recv failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
475 std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
476 std::vector< std::vector< int > > L1p;
477 std::vector< EntityHandle > L2hloc, L2hrem;
478 std::vector< unsigned int > L2p;
480 buffer->reset_ptr(
sizeof(
int ) );
481 std::vector< EntityHandle > entities_vec(
entities.size() );
483 rval = pco->
unpack_buffer(
buffer->buff_ptr,
false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
495 if( local_primary_ents.
empty() )
499 local_primary_ents = verts;
505 std::vector< int > orig_senders( local_primary_ents.
size(), sender1 );
506 rval = pco->
get_moab()->
tag_set_data( orgSendProcTag, local_primary_ents, orig_senders.data() );
515 std::ostringstream partial_outFile;
517 partial_outFile <<
"part_send_" << sender1 <<
"."
522 std::cout <<
" writing from receiver " <<
rankInJoin <<
" from sender " << sender1
523 <<
" entities: " <<
entities.size() << std::endl;
532 if( newEnts.
empty() )
534 std::cout <<
" WARNING: this task did not receive any entities \n";
539 newEnts =
subtract( newEnts, local_verts );
551 rval =
mb->get_connectivity( eh, conn, num_nodes );
MB_CHK_ERR( rval );
562 int ierr, nsize = (int)
sendReqs.size();
563 std::vector< MPI_Status > mult_status;
564 mult_status.resize(
sendReqs.size() );
565 ierr = MPI_Waitall( nsize,
sendReqs.data(), mult_status.data() );
567 if( ierr != 0 )
return MB_FAILURE;
571 std::vector< ParallelComm::Buffer* >::iterator vit;
583 std::vector< Tag >& tag_handles )
591 int total_bytes_per_entity = 0;
593 std::vector< int > vect_bytes_per_tag;
595 std::vector< int > tag_sizes;
597 for(
size_t i = 0; i < tag_handles.size(); i++ )
600 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
602 rval =
mb->tag_get_length( tag_handles[i], tag_size1 );
MB_CHK_ERR( rval );
604 bytes_per_tag = bytes_per_tag / tag_size1;
606 total_bytes_per_entity += bytes_per_tag;
607 vect_bytes_per_tag.push_back( bytes_per_tag );
610 rval =
mb->tag_get_length( tag_handles[i], tag_size );
MB_CHK_ERR( rval );
611 tag_sizes.push_back( tag_size );
624 int receiver_proc = it->first;
625 Range ents = it->second;
626 int size_buffer = 4 + total_bytes_per_entity *
630 buffer->reset_ptr(
sizeof(
int ) );
631 for(
size_t i = 0; i < tag_handles.size(); i++ )
635 rval =
mb->tag_get_data( tag_handles[i], ents, (
void*)(
buffer->buff_ptr ) );
MB_CHK_ERR( rval );
637 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.
size();
639 *( (
int*)
buffer->mem_ptr ) = size_buffer;
642 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm,
644 if( ierr != 0 )
return MB_FAILURE;
654 Tag gidTag =
mb->globalId_tag();
655 std::vector< int > gids;
656 gids.resize( owned.
size() );
657 rval =
mb->tag_get_data( gidTag, owned, gids.data() );
MB_CHK_ERR( rval );
658 std::map< int, EntityHandle > gidToHandle;
663 gidToHandle[gids[i++]] = eh;
667 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
670 int receiver_proc = mit->first;
671 std::vector< int >& eids = mit->second;
672 int size_buffer = 4 + total_bytes_per_entity *
675 buffer->reset_ptr(
sizeof(
int ) );
677 std::ofstream dbfile;
678 std::stringstream outf;
679 outf <<
"from_" <<
rankInJoin <<
"_send_to_" << receiver_proc <<
".txt";
680 dbfile.open( outf.str().c_str() );
681 dbfile <<
"from " <<
rankInJoin <<
" send to " << receiver_proc <<
"\n";
687 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
691 for( i = 0; i < tag_handles.size(); i++ )
693 rval =
mb->tag_get_data( tag_handles[i], &eh, 1, (
void*)(
buffer->buff_ptr ) );
701 dbfile <<
"global ID " << eID <<
" local handle " <<
mb->id_from_handle( eh ) <<
" vals: ";
702 double* vals = (
double*)(
buffer->buff_ptr );
703 for(
int kk = 0; kk < tag_sizes[i]; kk++ )
705 dbfile <<
" " << *vals;
710 buffer->buff_ptr += vect_bytes_per_tag[i];
717 *( (
int*)
buffer->mem_ptr ) = size_buffer;
719 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm,
721 if( ierr != 0 )
return MB_FAILURE;
731 std::vector< std::vector< double > > valuesTags;
732 valuesTags.resize( tag_handles.size() );
733 for(
size_t i = 0; i < tag_handles.size(); i++ )
736 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
737 valuesTags[i].resize( owned.
size() * bytes_per_tag /
sizeof(
double ) );
739 rval =
mb->tag_get_data( tag_handles[i], owned, (
void*)( valuesTags[i].data() ) );
MB_CHK_ERR( rval );
743 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
746 int receiver_proc = mit->first;
747 std::vector< int >& eids = mit->second;
748 std::vector< int >& index_in_values =
map_index[receiver_proc];
749 std::vector< int >& index_ptr =
map_ptr[receiver_proc];
750 int size_buffer = 4 + total_bytes_per_entity *
753 buffer->reset_ptr(
sizeof(
int ) );
755 std::ofstream dbfile;
756 std::stringstream outf;
757 outf <<
"from_" <<
rankInJoin <<
"_send_to_" << receiver_proc <<
".txt";
758 dbfile.open( outf.str().c_str() );
759 dbfile <<
"from " <<
rankInJoin <<
" send to " << receiver_proc <<
"\n";
764 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
766 int index_in_v = index_in_values[index_ptr[j]];
767 for(
size_t i = 0; i < tag_handles.size(); i++ )
770 *( (
double*)(
buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
774 *( (
int*)
buffer->mem_ptr ) = size_buffer;
776 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, mtag, jcomm,
778 if( ierr != 0 )
return MB_FAILURE;
789 std::vector< Tag >& tag_handles )
800 int total_bytes_per_entity = 0;
801 std::vector< int > vect_bytes_per_tag;
803 std::vector< int > tag_sizes;
805 for(
size_t i = 0; i < tag_handles.size(); i++ )
808 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
809 total_bytes_per_entity += bytes_per_tag;
810 vect_bytes_per_tag.push_back( bytes_per_tag );
813 rval =
mb->tag_get_length( tag_handles[i], tag_size );
MB_CHK_ERR( rval );
814 tag_sizes.push_back( tag_size );
828 int sender_proc = it->first;
829 Range ents = it->second;
830 int size_buffer = 4 + total_bytes_per_entity *
834 buffer->reset_ptr(
sizeof(
int ) );
836 *( (
int*)
buffer->mem_ptr ) = size_buffer;
839 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status );
840 if( ierr != 0 )
return MB_FAILURE;
844 for(
size_t i = 0; i < tag_handles.size(); i++ )
846 rval =
mb->tag_set_data( tag_handles[i], ents, (
void*)(
buffer->buff_ptr ) );
847 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.
size();
857 Tag gidTag =
mb->globalId_tag();
858 std::vector< int > gids;
859 gids.resize( owned.
size() );
860 rval =
mb->tag_get_data( gidTag, owned, gids.data() );
MB_CHK_ERR( rval );
861 std::map< int, EntityHandle > gidToHandle;
866 gidToHandle[gids[i++]] = eh;
870 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
873 int sender_proc = mit->first;
874 std::vector< int >& eids = mit->second;
875 int size_buffer = 4 + total_bytes_per_entity *
878 buffer->reset_ptr(
sizeof(
int ) );
879 *( (
int*)
buffer->mem_ptr ) = size_buffer;
882 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status );
883 if( ierr != 0 )
return MB_FAILURE;
886 std::ofstream dbfile;
887 std::stringstream outf;
888 outf <<
"recvFrom_" << sender_proc <<
"_on_proc_" <<
rankInJoin <<
".txt";
889 dbfile.open( outf.str().c_str() );
890 dbfile <<
"recvFrom_" << sender_proc <<
" on proc " <<
rankInJoin <<
"\n";
898 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); ++it )
901 std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID );
902 if( mit2 == gidToHandle.end() )
904 std::cout <<
" on rank: " <<
rankInJoin <<
" cannot find entity handle with global ID " << eID
909 for( i = 0; i < tag_handles.size(); i++ )
911 rval =
mb->tag_set_data( tag_handles[i], &eh, 1, (
void*)(
buffer->buff_ptr ) );
MB_CHK_ERR( rval );
913 dbfile <<
"global ID " << eID <<
" local handle " <<
mb->id_from_handle( eh ) <<
" vals: ";
914 double* vals = (
double*)(
buffer->buff_ptr );
915 for(
int kk = 0; kk < tag_sizes[i]; kk++ )
917 dbfile <<
" " << *vals;
922 buffer->buff_ptr += vect_bytes_per_tag[i];
939 std::vector< std::vector< double > > valuesTags;
940 valuesTags.resize( tag_handles.size() );
941 for(
size_t i = 0; i < tag_handles.size(); i++ )
944 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
945 valuesTags[i].resize( owned.
size() * bytes_per_tag /
sizeof(
double ) );
952 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
955 int sender_proc = mit->first;
956 std::vector< int >& eids = mit->second;
957 std::vector< int >& index_in_values =
map_index[sender_proc];
958 std::vector< int >& index_ptr =
map_ptr[sender_proc];
959 int size_buffer = 4 + total_bytes_per_entity *
962 buffer->reset_ptr(
sizeof(
int ) );
965 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, mtag, jcomm, &status );
966 if( ierr != 0 )
return MB_FAILURE;
969 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); ++it, ++j )
971 for(
size_t i = 0; i < tag_handles.size(); i++ )
974 double val = *( (
double*)(
buffer->buff_ptr ) );
976 for(
int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
977 valuesTags[i][index_in_values[k]] = val;
984 for(
size_t i = 0; i < tag_handles.size(); i++ )
987 rval =
mb->tag_set_data( tag_handles[i], owned, (
void*)( valuesTags[i].data() ) );
MB_CHK_ERR( rval );
999 int n = TLcovIDs.
get_n();
1001 for(
int i = 0; i < n; i++ )
1003 int to_proc = TLcovIDs.
vi_wr[2 * i];
1004 int globalIdElem = TLcovIDs.
vi_wr[2 * i + 1];
1011 std::cout <<
" towards task " << mit->first <<
" send: " << mit->second.size() <<
" cells " << std::endl;
1012 for(
size_t i = 0; i < mit->second.size(); i++ )
1014 std::cout <<
" " << mit->second[i];
1016 std::cout << std::endl;
1024 std::map<
int, std::set< int > >& idsFromProcs )
1026 for(
auto mt = idsFromProcs.begin(); mt != idsFromProcs.end(); ++mt )
1028 int fromProc = mt->first;
1029 std::set< int >& setIds = mt->second;
1033 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
1036 listIDs[indx++] = valueID;
1047 int n = TLBackToComp.
get_n();
1050 std::map< int, std::set< int > > uniqueIDs;
1051 for(
int i = 0; i < n; i++ )
1053 int to_proc = TLBackToComp.
vi_wr[3 * i + 2];
1054 int globalId = TLBackToComp.
vi_wr[3 * i + 1];
1055 uniqueIDs[to_proc].insert( globalId );
1060 std::vector< std::pair< int, int > > vp;
1061 vp.reserve( valuesComp.size() );
1065 for(
size_t i = 0; i < valuesComp.size(); ++i )
1067 vp.push_back( std::make_pair( valuesComp[i], i ) );
1070 sort( vp.begin(), vp.end() );
1075 for(
auto it = uniqueIDs.begin(); it != uniqueIDs.end(); ++it )
1077 int procId = it->first;
1078 std::set< int >& nums = it->second;
1079 std::vector< int >& indx =
map_ptr[procId];
1080 std::vector< int >& indices =
map_index[procId];
1081 indx.resize( nums.size() + 1 );
1085 for(
auto sst = nums.begin(); sst != nums.end(); ++sst, ++indexVal )
1089 indx[indexVal + 1] = indx[indexVal];
1090 while( ( indexInVp < (
int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )
1092 if( vp[indexInVp].
first == val )
1094 indx[indexVal + 1]++;
1095 indices.push_back( vp[indexInVp].second );
1102 std::stringstream f1;
1103 std::ofstream dbfile;
1104 f1 <<
"Involve_" << comp <<
"_" <<
rankInJoin <<
".txt";
1105 dbfile.open( f1.str().c_str() );
1108 int corrTask = mit->first;
1109 std::vector< int >& corrIds = mit->second;
1110 std::vector< int >& indx =
map_ptr[corrTask];
1111 std::vector< int >& indices =
map_index[corrTask];
1113 dbfile <<
" towards proc " << corrTask <<
" \n";
1114 for(
int i = 0; i < (int)corrIds.size(); i++ )
1116 dbfile << corrIds[i] <<
" [" << indx[i] <<
"," << indx[i + 1] <<
") : ";
1117 for(
int j = indx[i]; j < indx[i + 1]; j++ )
1118 dbfile << indices[j] <<
" ";
1152 int primaryDim =
mb->dimension_from_handle( *owned.
rbegin() );
1153 int interfaceDim = primaryDim - 1;
1160 Tag gidTag =
mb->globalId_tag();
1162 unsigned char pstatus;
1164 std::multimap< int, int > extraGraphEdges;
1166 std::map< int, int > extraCellsProc;
1175 std::cout <<
" on sender task " << pco->
rank() <<
" number of shared interface cells " << sharedEdges.
size()
1185 std::map< EntityHandle, int > edgeToCell;
1192 rval =
mb->get_adjacencies( &
edge, 1, primaryDim,
false, adjEnts );
MB_CHK_ERR( rval );
1193 if( adjEnts.
size() > 0 )
1197 rval =
mb->tag_get_data( gidTag, &adjCell, 1, &gid );
MB_CHK_ERR( rval );
1199 int n = TLe.
get_n();
1200 TLe.
vi_wr[2 * n] = shprocs[0];
1201 TLe.
vi_wr[2 * n + 1] = gid;
1202 TLe.
vul_wr[n] = shhandles[0];
1203 edgeToCell[
edge] = gid;
1209 std::stringstream ff2;
1210 ff2 <<
"TLe_" << pco->
rank() <<
".txt";
1217 int ne = TLe.
get_n();
1218 for(
int i = 0; i < ne; i++ )
1220 int sharedProc = TLe.
vi_rd[2 * i];
1221 int remoteCellID = TLe.
vi_rd[2 * i + 1];
1223 int localCellId = edgeToCell[localCell];
1225 std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
1226 extraGraphEdges.insert( extraAdj );
1228 extraCellsProc[remoteCellID] = sharedProc;
1230 std::cout <<
"local ID " << edgeToCell[localCell] <<
" remote cell ID: " << remoteCellID <<
"\n";
1235 if(
rootSender ) std::cout <<
" time preparing the input for Zoltan:" << t2 - t1 <<
" seconds. \n";
1237 #ifdef MOAB_HAVE_ZOLTAN
1241 std::map< int, Range > distribution;
1245 rval = mbZTool->
partition_owned_cells( primaryCells, extraGraphEdges, extraCellsProc, numNewPartitions,
1247 for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
1249 int part_index = mit->first;
1250 assert( part_index < numNewPartitions );
1258 if(
rootSender ) std::cout <<
" time spent by Zoltan " << t3 - t2 <<
" seconds. \n";
1268 int numberReceivers =
1272 std::vector< int > displs( 1 );
1273 std::vector< int > counts( 1 );
1276 displs.resize( nSenders + 1 );
1277 counts.resize( nSenders );
1280 int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, counts.data(), 1, MPI_INT, 0, pco->
comm() );
1281 if( ierr != MPI_SUCCESS )
return MB_FAILURE;
1286 for(
int k = 0; k < nSenders; k++ )
1288 displs[k + 1] = displs[k] + counts[k];
1291 std::vector< int >
buffer;
1294 std::vector< int > recvs;
1297 recvs.push_back( mit->first );
1300 MPI_Gatherv( recvs.data(), numberReceivers, MPI_INT,
buffer.data(), counts.data(), displs.data(), MPI_INT, 0, pco->
comm() );
1301 if( ierr != MPI_SUCCESS )
return MB_FAILURE;
1308 std::ofstream dbfileSender;
1309 std::stringstream outf;
1310 outf <<
"S_" <<
compid1 <<
"_R_" <<
compid2 <<
"_SenderGraph.txt";
1311 dbfileSender.open( outf.str().c_str() );
1312 dbfileSender <<
" number senders: " << nSenders <<
"\n";
1313 dbfileSender <<
" senderRank \treceivers \n";
1314 for(
int k = 0; k < nSenders; k++ )
1316 int indexInBuff = displs[k];
1318 dbfileSender << senderTask <<
"\t\t";
1319 for(
int j = 0; j < counts[k]; j++ )
1321 int recvTask =
buffer[indexInBuff + j];
1322 dbfileSender << recvTask <<
" ";
1324 dbfileSender <<
"\n";
1326 dbfileSender.close();
1328 for(
int k = 0; k < nSenders; k++ )
1330 int indexInBuff = displs[k];
1332 for(
int j = 0; j < counts[k]; j++ )
1334 int recvTask =
buffer[indexInBuff + j];
1335 recv_graph[recvTask].push_back( senderTask );
1340 std::ofstream dbfile;
1341 std::stringstream outf2;
1342 outf2 <<
"S_" <<
compid1 <<
"_R_" <<
compid2 <<
"_RecvGraph.txt";
1343 dbfile.open( outf2.str().c_str() );
1344 dbfile <<
" number receivers: " <<
recv_graph.size() <<
"\n";
1345 dbfile <<
" receiverRank \tsenders \n";
1346 for( std::map<
int, std::vector< int > >::iterator mit =
recv_graph.begin(); mit !=
recv_graph.end(); mit++ )
1348 int recvTask = mit->first;
1349 std::vector< int >&
senders = mit->second;
1350 dbfile << recvTask <<
"\t\t";
1351 for( std::vector< int >::iterator vit =
senders.begin(); vit !=
senders.end(); vit++ )
1352 dbfile << *vit <<
" ";
1370 std::ofstream dbfile;
1371 std::stringstream outf;
1373 dbfile.open( outf.str().c_str() );
1377 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1380 int receiver_proc = mit->first;
1381 std::vector< int >& eids = mit->second;
1382 dbfile <<
"receiver: " << receiver_proc <<
" size:" << eids.size() <<
"\n";
1389 int receiver_proc = mit->first;
1390 Range& eids = mit->second;
1391 dbfile <<
"receiver: " << receiver_proc <<
" size:" << eids.
size() <<
"\n";
1396 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1399 int receiver_proc = mit->first;
1400 dbfile <<
"receiver: " << receiver_proc <<
" size:" << mit->second.size() <<
"\n";
1407 std::ofstream dbfile;
1408 std::stringstream outf;
1411 dbfile.open( outf.str().c_str() );
1415 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1418 int sender_proc = mit->first;
1419 std::vector< int >& eids = mit->second;
1420 dbfile <<
"sender: " << sender_proc <<
" size:" << eids.size() <<
"\n";
1427 int sender_proc = mit->first;
1428 Range& eids = mit->second;
1429 dbfile <<
"sender: " << sender_proc <<
" size:" << eids.
size() <<
"\n";
1434 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1437 int sender_proc = mit->first;
1438 dbfile <<
"receiver: " << sender_proc <<
" size:" << mit->second.size() <<
"\n";