11 #ifdef MOAB_HAVE_ZOLTAN
21 : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
37 if( MPI_SUCCESS != mpierr )
41 if( MPI_SUCCESS != mpierr )
84 MPI_Comm_group( joincomm, &global_grp );
88 MPI_Group_size( group, &grp_size );
89 std::vector< int > rks( grp_size );
90 ranks.resize( grp_size );
92 for(
int i = 0; i < grp_size; i++ )
95 MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
96 MPI_Group_free( &global_grp );
108 if( numElemsPerTaskInGroup1.size() !=
senderTasks.size() )
113 std::vector< int > accum;
114 accum.push_back( 0 );
118 for(
size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
120 total_elems += numElemsPerTaskInGroup1[k];
121 accum.push_back( total_elems );
126 int num_per_receiver = (int)( total_elems / num_recv );
127 int leftover = total_elems - num_per_receiver * num_recv;
130 std::vector< int > starts;
131 starts.resize( num_recv + 1 );
133 for(
int k = 0; k < num_recv; k++ )
135 starts[k + 1] = starts[k] + num_per_receiver;
136 if( k < leftover ) starts[k + 1]++;
141 int lastUsedReceiverRank = 0;
142 for(
int j = 0; j < num_senders; j++ )
146 for(
int k = lastUsedReceiverRank; k < num_recv; k++ )
149 if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
157 if( starts[k] >= accum[j] )
159 if( starts[k + 1] >= accum[j + 1] )
160 sizeOverlap = accum[j + 1] - starts[k];
162 sizeOverlap = starts[k + 1] - starts[k];
166 if( starts[k + 1] >= accum[j + 1] )
167 sizeOverlap = accum[j + 1] - accum[j];
169 sizeOverlap = starts[k + 1] - accum[j];
174 if( starts[k] > accum[j + 1] )
176 lastUsedReceiverRank = k - 1;
201 std::cout <<
" WARNING: empty partitions, some receiver tasks will receive nothing.\n";
203 for( std::map<
int, std::vector< int > >::iterator it =
recv_graph.begin(); it !=
recv_graph.end(); it++ )
205 int recv = it->first;
206 std::vector< int >&
senders = it->second;
207 packed_recv_array.push_back( recv );
208 packed_recv_array.push_back( (
int)
senders.size() );
210 for(
int k = 0; k < (int)
senders.size(); k++ )
211 packed_recv_array.push_back(
senders[k] );
220 std::vector< int >& distribution =
sender_sizes[senderTask];
222 if( distribution.size() !=
receivers.size() )
225 Range current = owned;
228 Range rleftover = current;
229 for(
size_t k = 0; k <
receivers.size(); k++ )
235 rleftover =
subtract( current, newr );
248 Range current = owned;
251 Range rleftover = current;
252 for(
size_t k = 0; k <
corr_tasks.size(); k++ )
258 rleftover =
subtract( current, newr );
275 std::vector< int > packed_recv_array;
279 int size_pack_array = (int)packed_recv_array.size();
282 for(
int k = 0; k < size_pack_array; k++ )
292 if( ierr != 0 )
return MB_FAILURE;
318 int receiver_proc = it->first;
319 Range ents = it->second;
326 std::cout <<
" can't get adjacencies. for entities to send\n";
331 buffer->reset_ptr(
sizeof(
int ) );
335 std::cout <<
" can't pack buffer for entities to send\n";
338 int size_pack =
buffer->get_current_size();
347 ierr = MPI_Isend(
buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, receiver_proc, 2, jcomm,
349 if( ierr != 0 )
return MB_FAILURE;
361 MPI_Comm receive = pco->
comm();
362 int size_pack_array, ierr;
374 ierr = MPI_Probe(
sender( 0 ), 20, jcomm, &status );
377 std::cout <<
" MPI_Probe failure: " << ierr <<
"\n";
381 ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
384 std::cout <<
" MPI_Get_count failure: " << ierr <<
"\n";
388 std::cout <<
" receive comm graph size: " << size_pack_array <<
"\n";
390 pack_array.resize( size_pack_array );
391 ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT,
sender( 0 ), 20, jcomm, &status );
392 if( 0 != ierr )
return MB_FAILURE;
394 std::cout <<
" receive comm graph ";
395 for(
int k = 0; k < (int)pack_array.size(); k++ )
396 std::cout <<
" " << pack_array[k];
402 ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
403 if( 0 != ierr )
return MB_FAILURE;
404 pack_array.resize( size_pack_array );
405 ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
406 if( 0 != ierr )
return MB_FAILURE;
413 std::vector< int >& senders_local )
429 if( !senders_local.empty() )
431 for(
size_t k = 0; k < senders_local.size(); k++ )
433 int sender1 = senders_local[k];
443 ierr = MPI_Probe( sender1, 2, jcomm, &status );
446 std::cout <<
" MPI_Probe failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
451 ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
454 std::cout <<
" MPI_Get_count failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
464 ierr = MPI_Recv(
buffer->mem_ptr, size_pack, MPI_UNSIGNED_CHAR, sender1, 2, jcomm, &status );
467 std::cout <<
" MPI_Recv failure in ParCommGraph::receive_mesh " << ierr <<
"\n";
472 std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
473 std::vector< std::vector< int > > L1p;
474 std::vector< EntityHandle > L2hloc, L2hrem;
475 std::vector< unsigned int > L2p;
477 buffer->reset_ptr(
sizeof(
int ) );
478 std::vector< EntityHandle > entities_vec(
entities.size() );
480 rval = pco->
unpack_buffer(
buffer->buff_ptr,
false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
492 if( local_primary_ents.
empty() )
496 local_primary_ents = verts;
502 std::vector< int > orig_senders( local_primary_ents.
size(), sender1 );
512 std::ostringstream partial_outFile;
514 partial_outFile <<
"part_send_" << sender1 <<
"."
519 std::cout <<
" writing from receiver " <<
rankInJoin <<
" from sender " << sender1
520 <<
" entities: " <<
entities.size() << std::endl;
529 if( newEnts.
empty() )
531 std::cout <<
" WARNING: this task did not receive any entities \n";
536 newEnts =
subtract( newEnts, local_verts );
548 rval =
mb->get_connectivity( eh, conn, num_nodes );
MB_CHK_ERR( rval );
559 int ierr, nsize = (int)
sendReqs.size();
560 std::vector< MPI_Status > mult_status;
561 mult_status.resize(
sendReqs.size() );
562 ierr = MPI_Waitall( nsize, &
sendReqs[0], &mult_status[0] );
564 if( ierr != 0 )
return MB_FAILURE;
568 std::vector< ParallelComm::Buffer* >::iterator vit;
580 std::vector< Tag >& tag_handles )
588 int total_bytes_per_entity = 0;
590 std::vector< int > vect_bytes_per_tag;
592 std::vector< int > tag_sizes;
594 for(
size_t i = 0; i < tag_handles.size(); i++ )
597 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
599 rval =
mb->tag_get_length( tag_handles[i], tag_size1 );
MB_CHK_ERR( rval );
601 bytes_per_tag = bytes_per_tag / tag_size1;
603 total_bytes_per_entity += bytes_per_tag;
604 vect_bytes_per_tag.push_back( bytes_per_tag );
607 rval =
mb->tag_get_length( tag_handles[i], tag_size );
MB_CHK_ERR( rval );
608 tag_sizes.push_back( tag_size );
620 int receiver_proc = it->first;
621 Range ents = it->second;
622 int size_buffer = 4 + total_bytes_per_entity *
626 buffer->reset_ptr(
sizeof(
int ) );
627 for(
size_t i = 0; i < tag_handles.size(); i++ )
631 rval =
mb->tag_get_data( tag_handles[i], ents, (
void*)(
buffer->buff_ptr ) );
MB_CHK_ERR( rval );
633 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.
size();
635 *( (
int*)
buffer->mem_ptr ) = size_buffer;
637 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, 222, jcomm,
639 if( ierr != 0 )
return MB_FAILURE;
649 Tag gidTag =
mb->globalId_tag();
650 std::vector< int > gids;
651 gids.resize( owned.
size() );
652 rval =
mb->tag_get_data( gidTag, owned, &gids[0] );
MB_CHK_ERR( rval );
653 std::map< int, EntityHandle > gidToHandle;
658 gidToHandle[gids[i++]] = eh;
662 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
665 int receiver_proc = mit->first;
666 std::vector< int >& eids = mit->second;
667 int size_buffer = 4 + total_bytes_per_entity *
670 buffer->reset_ptr(
sizeof(
int ) );
672 std::ofstream dbfile;
673 std::stringstream outf;
674 outf <<
"from_" <<
rankInJoin <<
"_send_to_" << receiver_proc <<
".txt";
675 dbfile.open( outf.str().c_str() );
676 dbfile <<
"from " <<
rankInJoin <<
" send to " << receiver_proc <<
"\n";
682 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
686 for( i = 0; i < tag_handles.size(); i++ )
688 rval =
mb->tag_get_data( tag_handles[i], &eh, 1, (
void*)(
buffer->buff_ptr ) );
696 dbfile <<
"global ID " << eID <<
" local handle " <<
mb->id_from_handle( eh ) <<
" vals: ";
697 double* vals = (
double*)(
buffer->buff_ptr );
698 for(
int kk = 0; kk < tag_sizes[i]; kk++ )
700 dbfile <<
" " << *vals;
705 buffer->buff_ptr += vect_bytes_per_tag[i];
712 *( (
int*)
buffer->mem_ptr ) = size_buffer;
714 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, 222, jcomm,
716 if( ierr != 0 )
return MB_FAILURE;
726 std::vector< std::vector< double > > valuesTags;
727 valuesTags.resize( tag_handles.size() );
728 for(
size_t i = 0; i < tag_handles.size(); i++ )
732 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
733 valuesTags[i].resize( owned.
size() * bytes_per_tag /
sizeof(
double ) );
735 rval =
mb->tag_get_data( tag_handles[i], owned, (
void*)( &( valuesTags[i][0] ) ) );
MB_CHK_ERR( rval );
739 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
742 int receiver_proc = mit->first;
743 std::vector< int >& eids = mit->second;
744 std::vector< int >& index_in_values =
map_index[receiver_proc];
745 std::vector< int >& index_ptr =
map_ptr[receiver_proc];
746 int size_buffer = 4 + total_bytes_per_entity *
749 buffer->reset_ptr(
sizeof(
int ) );
751 std::ofstream dbfile;
752 std::stringstream outf;
753 outf <<
"from_" <<
rankInJoin <<
"_send_to_" << receiver_proc <<
".txt";
754 dbfile.open( outf.str().c_str() );
755 dbfile <<
"from " <<
rankInJoin <<
" send to " << receiver_proc <<
"\n";
760 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
762 int index_in_v = index_in_values[index_ptr[j]];
763 for(
size_t i = 0; i < tag_handles.size(); i++ )
766 *( (
double*)(
buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
770 *( (
int*)
buffer->mem_ptr ) = size_buffer;
772 ierr = MPI_Isend(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, receiver_proc, 222, jcomm,
774 if( ierr != 0 )
return MB_FAILURE;
785 std::vector< Tag >& tag_handles )
796 int total_bytes_per_entity = 0;
797 std::vector< int > vect_bytes_per_tag;
799 std::vector< int > tag_sizes;
801 for(
size_t i = 0; i < tag_handles.size(); i++ )
804 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
805 total_bytes_per_entity += bytes_per_tag;
806 vect_bytes_per_tag.push_back( bytes_per_tag );
809 rval =
mb->tag_get_length( tag_handles[i], tag_size );
MB_CHK_ERR( rval );
810 tag_sizes.push_back( tag_size );
822 int sender_proc = it->first;
823 Range ents = it->second;
824 int size_buffer = 4 + total_bytes_per_entity *
828 buffer->reset_ptr(
sizeof(
int ) );
830 *( (
int*)
buffer->mem_ptr ) = size_buffer;
833 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, 222, jcomm, &status );
834 if( ierr != 0 )
return MB_FAILURE;
838 for(
size_t i = 0; i < tag_handles.size(); i++ )
840 rval =
mb->tag_set_data( tag_handles[i], ents, (
void*)(
buffer->buff_ptr ) );
841 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.
size();
851 Tag gidTag =
mb->globalId_tag();
852 std::vector< int > gids;
853 gids.resize( owned.
size() );
854 rval =
mb->tag_get_data( gidTag, owned, &gids[0] );
MB_CHK_ERR( rval );
855 std::map< int, EntityHandle > gidToHandle;
860 gidToHandle[gids[i++]] = eh;
864 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
867 int sender_proc = mit->first;
868 std::vector< int >& eids = mit->second;
869 int size_buffer = 4 + total_bytes_per_entity *
872 buffer->reset_ptr(
sizeof(
int ) );
873 *( (
int*)
buffer->mem_ptr ) = size_buffer;
876 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, 222, jcomm, &status );
877 if( ierr != 0 )
return MB_FAILURE;
880 std::ofstream dbfile;
881 std::stringstream outf;
882 outf <<
"recvFrom_" << sender_proc <<
"_on_proc_" <<
rankInJoin <<
".txt";
883 dbfile.open( outf.str().c_str() );
884 dbfile <<
"recvFrom_" << sender_proc <<
" on proc " <<
rankInJoin <<
"\n";
892 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
895 std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID );
896 if( mit2 == gidToHandle.end() )
898 std::cout <<
" on rank: " <<
rankInJoin <<
" cannot find entity handle with global ID " << eID
903 for( i = 0; i < tag_handles.size(); i++ )
905 rval =
mb->tag_set_data( tag_handles[i], &eh, 1, (
void*)(
buffer->buff_ptr ) );
MB_CHK_ERR( rval );
907 dbfile <<
"global ID " << eID <<
" local handle " <<
mb->id_from_handle( eh ) <<
" vals: ";
908 double* vals = (
double*)(
buffer->buff_ptr );
909 for(
int kk = 0; kk < tag_sizes[i]; kk++ )
911 dbfile <<
" " << *vals;
916 buffer->buff_ptr += vect_bytes_per_tag[i];
933 std::vector< std::vector< double > > valuesTags;
934 valuesTags.resize( tag_handles.size() );
935 for(
size_t i = 0; i < tag_handles.size(); i++ )
938 rval =
mb->tag_get_bytes( tag_handles[i], bytes_per_tag );
MB_CHK_ERR( rval );
939 valuesTags[i].resize( owned.
size() * bytes_per_tag /
sizeof(
double ) );
946 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
949 int sender_proc = mit->first;
950 std::vector< int >& eids = mit->second;
951 std::vector< int >& index_in_values =
map_index[sender_proc];
952 std::vector< int >& index_ptr =
map_ptr[sender_proc];
953 int size_buffer = 4 + total_bytes_per_entity *
956 buffer->reset_ptr(
sizeof(
int ) );
959 ierr = MPI_Recv(
buffer->mem_ptr, size_buffer, MPI_UNSIGNED_CHAR, sender_proc, 222, jcomm, &status );
960 if( ierr != 0 )
return MB_FAILURE;
963 for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
965 for(
size_t i = 0; i < tag_handles.size(); i++ )
968 double val = *( (
double*)(
buffer->buff_ptr ) );
970 for(
int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
971 valuesTags[i][index_in_values[k]] = val;
978 for(
size_t i = 0; i < tag_handles.size(); i++ )
981 rval =
mb->tag_set_data( tag_handles[i], owned, (
void*)( &( valuesTags[i][0] ) ) );
MB_CHK_ERR( rval );
993 int n = TLcovIDs.
get_n();
995 for(
int i = 0; i < n; i++ )
997 int to_proc = TLcovIDs.
vi_wr[2 * i];
998 int globalIdElem = TLcovIDs.
vi_wr[2 * i + 1];
1005 std::cout <<
" towards task " << mit->first <<
" send: " << mit->second.size() <<
" cells " << std::endl;
1006 for(
size_t i = 0; i < mit->second.size(); i++ )
1008 std::cout <<
" " << mit->second[i];
1010 std::cout << std::endl;
1018 std::map<
int, std::set< int > >& idsFromProcs )
1020 for( std::map<
int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
1022 int fromProc = mt->first;
1023 std::set< int >& setIds = mt->second;
1027 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
1030 listIDs[indx++] = valueID;
1041 int n = TLBackToComp.
get_n();
1044 std::map< int, std::set< int > > uniqueIDs;
1046 for(
int i = 0; i < n; i++ )
1048 int to_proc = TLBackToComp.
vi_wr[3 * i + 2];
1049 int globalId = TLBackToComp.
vi_wr[3 * i + 1];
1050 uniqueIDs[to_proc].insert( globalId );
1055 std::vector< std::pair< int, int > > vp;
1056 vp.reserve( valuesComp.size() );
1060 for(
int i = 0; i < (int)valuesComp.size(); ++i )
1062 vp.push_back( std::make_pair( valuesComp[i], i ) );
1065 sort( vp.begin(), vp.end() );
1070 for( std::map<
int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
1072 int procId = it->first;
1073 std::set< int >& nums = it->second;
1074 std::vector< int >& indx =
map_ptr[procId];
1075 std::vector< int >& indices =
map_index[procId];
1076 indx.resize( nums.size() + 1 );
1080 for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
1084 indx[indexVal + 1] = indx[indexVal];
1085 while( ( indexInVp < (
int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )
1087 if( vp[indexInVp].
first == val )
1089 indx[indexVal + 1]++;
1090 indices.push_back( vp[indexInVp].second );
1097 std::stringstream f1;
1098 std::ofstream dbfile;
1099 f1 <<
"Involve_" << comp <<
"_" <<
rankInJoin <<
".txt";
1100 dbfile.open( f1.str().c_str() );
1104 int corrTask = mit->first;
1105 std::vector< int >& corrIds = mit->second;
1106 std::vector< int >& indx =
map_ptr[corrTask];
1107 std::vector< int >& indices =
map_index[corrTask];
1109 dbfile <<
" towards proc " << corrTask <<
" \n";
1110 for(
int i = 0; i < (int)corrIds.size(); i++ )
1112 dbfile << corrIds[i] <<
" [" << indx[i] <<
"," << indx[i + 1] <<
") : ";
1113 for(
int j = indx[i]; j < indx[i + 1]; j++ )
1114 dbfile << indices[j] <<
" ";
1148 int primaryDim =
mb->dimension_from_handle( *owned.
rbegin() );
1149 int interfaceDim = primaryDim - 1;
1156 Tag gidTag =
mb->globalId_tag();
1158 unsigned char pstatus;
1160 std::multimap< int, int > extraGraphEdges;
1162 std::map< int, int > extraCellsProc;
1171 std::cout <<
" on sender task " << pco->
rank() <<
" number of shared interface cells " << sharedEdges.
size()
1181 std::map< EntityHandle, int > edgeToCell;
1188 rval =
mb->get_adjacencies( &
edge, 1, primaryDim,
false, adjEnts );
MB_CHK_ERR( rval );
1189 if( adjEnts.
size() > 0 )
1193 rval =
mb->tag_get_data( gidTag, &adjCell, 1, &gid );
MB_CHK_ERR( rval );
1195 int n = TLe.
get_n();
1196 TLe.
vi_wr[2 * n] = shprocs[0];
1197 TLe.
vi_wr[2 * n + 1] = gid;
1198 TLe.
vul_wr[n] = shhandles[0];
1199 edgeToCell[
edge] = gid;
1205 std::stringstream ff2;
1206 ff2 <<
"TLe_" << pco->
rank() <<
".txt";
1213 int ne = TLe.
get_n();
1214 for(
int i = 0; i < ne; i++ )
1216 int sharedProc = TLe.
vi_rd[2 * i];
1217 int remoteCellID = TLe.
vi_rd[2 * i + 1];
1219 int localCellId = edgeToCell[localCell];
1221 std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
1222 extraGraphEdges.insert( extraAdj );
1224 extraCellsProc[remoteCellID] = sharedProc;
1226 std::cout <<
"local ID " << edgeToCell[localCell] <<
" remote cell ID: " << remoteCellID <<
"\n";
1231 if(
rootSender ) std::cout <<
" time preparing the input for Zoltan:" << t2 - t1 <<
" seconds. \n";
1233 #ifdef MOAB_HAVE_ZOLTAN
1237 std::map< int, Range > distribution;
1241 rval = mbZTool->
partition_owned_cells( primaryCells, extraGraphEdges, extraCellsProc, numNewPartitions,
1243 for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
1245 int part_index = mit->first;
1246 assert( part_index < numNewPartitions );
1254 if(
rootSender ) std::cout <<
" time spent by Zoltan " << t3 - t2 <<
" seconds. \n";
1261 std::vector< int >& valuesComp1,
1263 Range& ents_of_interest,
1267 if(
rootSender ) std::cout <<
" find split_ranges on component " << comp <<
" according to read map \n";
1270 int n = TLBackToComp1.
get_n();
1273 std::map< int, std::set< int > > uniqueIDs;
1275 for(
int i = 0; i < n; i++ )
1277 int to_proc = TLBackToComp1.
vi_wr[3 * i + 2];
1278 int globalId = TLBackToComp1.
vi_wr[3 * i + 1];
1279 uniqueIDs[to_proc].insert( globalId );
1282 for(
int i = 0; i < (int)ents_of_interest.
size(); i++ )
1285 for(
int j = 0; j < lenTag; j++ )
1287 int marker = valuesComp1[i * lenTag + j];
1288 for(
auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
1290 int proc = mit->first;
1291 std::set< int >& setIds = mit->second;
1292 if( setIds.find( marker ) != setIds.end() )
1311 Tag gidtag =
mb->globalId_tag();
1317 rval =
mb->tag_get_handle(
"GLOBAL_DOFS", gds );
1320 std::map< int, Range > verts_to_proc;
1321 int numv = 0, numc = 0;
1324 int to_proc = it->first;
1328 rval =
mb->get_connectivity( it->second, verts );
MB_CHK_ERR( rval );
1329 numc += (int)it->second.size();
1333 verts_to_proc[to_proc] = verts;
1334 numv += (int)verts.
size();
1340 for(
auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
1342 int to_proc = it->first;
1343 Range& verts = it->second;
1347 int n = TLv.
get_n();
1348 TLv.
vi_wr[2 * n] = to_proc;
1350 rval =
mb->tag_get_data( gidtag, &v, 1, &( TLv.
vi_wr[2 * n + 1] ) );
MB_CHK_ERR( rval );
1357 int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;
1359 std::vector< int > gdvals;
1365 int to_proc = it->first;
1366 Range& cells = it->second;
1370 int n = TLc.
get_n();
1371 TLc.
vi_wr[size_tuple * n] = to_proc;
1372 int current_index = 2;
1373 rval =
mb->tag_get_data( gidtag, &cell, 1, &( TLc.
vi_wr[size_tuple * n + 1] ) );
MB_CHK_ERR( rval );
1376 rval =
mb->tag_get_data( gds, &cell, 1, &( TLc.
vi_wr[size_tuple * n + current_index] ) );
MB_CHK_ERR( rval );
1377 current_index += lenTagType1;
1382 rval =
mb->get_connectivity( cell, conn, nnodes );
MB_CHK_ERR( rval );
1384 TLc.
vi_wr[size_tuple * n + current_index] = nnodes;
1385 rval =
mb->tag_get_data( gidtag, conn, nnodes, &( TLc.
vi_wr[size_tuple * n + current_index + 1] ) );
MB_CHK_ERR( rval );
1397 Range& primary_ents,
1398 std::vector< int >& values_entities )
1402 Tag gidtag =
mb->globalId_tag();
1406 std::vector< int > def_val( lenTagType1, 0 );
1414 std::map< int, EntityHandle > vertexMap;
1417 int n = TLv.
get_n();
1419 for(
int i = 0; i < n; i++ )
1421 int gid = TLv.
vi_rd[2 * i + 1];
1422 if( vertexMap.find( gid ) == vertexMap.end() )
1437 rval =
mb->add_entities( fset, verts );
MB_CHK_ERR( rval );
1440 primary_ents = verts;
1441 values_entities.resize( verts.
size() );
1442 rval =
mb->tag_get_data( gidtag, verts, &values_entities[0] );
MB_CHK_ERR( rval );
1447 int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;
1451 std::map< int, EntityHandle > cellMap;
1452 for(
int i = 0; i < n; i++ )
1454 int from_proc = TLc.
vi_rd[size_tuple * i];
1455 int globalIdEl = TLc.
vi_rd[size_tuple * i + 1];
1456 if( cellMap.find( globalIdEl ) == cellMap.end() )
1458 int current_index = 2;
1459 if( 1 == type ) current_index += lenTagType1;
1460 int nnodes = TLc.
vi_rd[size_tuple * i + current_index];
1461 std::vector< EntityHandle > conn;
1462 conn.resize( nnodes );
1463 for(
int j = 0; j < nnodes; j++ )
1465 conn[j] = vertexMap[TLc.
vi_rd[size_tuple * i + current_index + j + 1]];
1468 EntityType entType =
MBQUAD;
1470 if( nnodes < 4 ) entType =
MBTRI;
1471 rval =
mb->create_element( entType, &conn[0], nnodes, new_element );
MB_CHK_SET_ERR( rval,
"can't create new element " );
1472 cells.
insert( new_element );
1473 cellMap[globalIdEl] = new_element;
1474 rval =
mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );
MB_CHK_SET_ERR( rval,
"can't set global id tag on cell " );
1478 rval =
mb->tag_set_data( gds, &new_element, 1, &( TLc.
vi_rd[size_tuple * i + 2] ) );
MB_CHK_SET_ERR( rval,
"can't set gds tag on cell " );
1483 rval =
mb->add_entities( fset, cells );
MB_CHK_ERR( rval );
1484 primary_ents = cells;
1487 values_entities.resize( lenTagType1 * primary_ents.
size() );
1488 rval =
mb->tag_get_data( gds, primary_ents, &values_entities[0] );
MB_CHK_ERR( rval );
1492 values_entities.resize( primary_ents.
size() );
1493 rval =
mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );
MB_CHK_ERR( rval );
1504 int numberReceivers =
1508 std::vector< int > displs( 1 );
1509 std::vector< int > counts( 1 );
1512 displs.resize( nSenders + 1 );
1513 counts.resize( nSenders );
1516 int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->
comm() );
1517 if( ierr != MPI_SUCCESS )
return MB_FAILURE;
1522 for(
int k = 0; k < nSenders; k++ )
1524 displs[k + 1] = displs[k] + counts[k];
1527 std::vector< int >
buffer;
1530 std::vector< int > recvs;
1533 recvs.push_back( mit->first );
1536 MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &
buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->
comm() );
1537 if( ierr != MPI_SUCCESS )
return MB_FAILURE;
1544 std::ofstream dbfileSender;
1545 std::stringstream outf;
1546 outf <<
"S_" <<
compid1 <<
"_R_" <<
compid2 <<
"_SenderGraph.txt";
1547 dbfileSender.open( outf.str().c_str() );
1548 dbfileSender <<
" number senders: " << nSenders <<
"\n";
1549 dbfileSender <<
" senderRank \treceivers \n";
1550 for(
int k = 0; k < nSenders; k++ )
1552 int indexInBuff = displs[k];
1554 dbfileSender << senderTask <<
"\t\t";
1555 for(
int j = 0; j < counts[k]; j++ )
1557 int recvTask =
buffer[indexInBuff + j];
1558 dbfileSender << recvTask <<
" ";
1560 dbfileSender <<
"\n";
1562 dbfileSender.close();
1564 for(
int k = 0; k < nSenders; k++ )
1566 int indexInBuff = displs[k];
1568 for(
int j = 0; j < counts[k]; j++ )
1570 int recvTask =
buffer[indexInBuff + j];
1571 recv_graph[recvTask].push_back( senderTask );
1576 std::ofstream dbfile;
1577 std::stringstream outf2;
1578 outf2 <<
"S_" <<
compid1 <<
"_R_" <<
compid2 <<
"_RecvGraph.txt";
1579 dbfile.open( outf2.str().c_str() );
1580 dbfile <<
" number receivers: " <<
recv_graph.size() <<
"\n";
1581 dbfile <<
" receiverRank \tsenders \n";
1582 for( std::map<
int, std::vector< int > >::iterator mit =
recv_graph.begin(); mit !=
recv_graph.end(); mit++ )
1584 int recvTask = mit->first;
1585 std::vector< int >&
senders = mit->second;
1586 dbfile << recvTask <<
"\t\t";
1587 for( std::vector< int >::iterator vit =
senders.begin(); vit !=
senders.end(); vit++ )
1588 dbfile << *vit <<
" ";
1606 std::ofstream dbfile;
1607 std::stringstream outf;
1609 dbfile.open( outf.str().c_str() );
1613 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1616 int receiver_proc = mit->first;
1617 std::vector< int >& eids = mit->second;
1618 dbfile <<
"receiver: " << receiver_proc <<
" size:" << eids.size() <<
"\n";
1625 int receiver_proc = mit->first;
1626 Range& eids = mit->second;
1627 dbfile <<
"receiver: " << receiver_proc <<
" size:" << eids.
size() <<
"\n";
1632 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1635 int receiver_proc = mit->first;
1636 dbfile <<
"receiver: " << receiver_proc <<
" size:" << mit->second.size() <<
"\n";
1643 std::ofstream dbfile;
1644 std::stringstream outf;
1647 dbfile.open( outf.str().c_str() );
1651 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1654 int sender_proc = mit->first;
1655 std::vector< int >& eids = mit->second;
1656 dbfile <<
"sender: " << sender_proc <<
" size:" << eids.size() <<
"\n";
1663 int sender_proc = mit->first;
1664 Range& eids = mit->second;
1665 dbfile <<
"sender: " << sender_proc <<
" size:" << eids.
size() <<
"\n";
1670 for( std::map<
int, std::vector< int > >::iterator mit =
involved_IDs_map.begin();
1673 int sender_proc = mit->first;
1674 dbfile <<
"receiver: " << sender_proc <<
" size:" << mit->second.size() <<
"\n";