20 #include <H5Tpublic.h>
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
57 std::map< std::string, moab::TempestOnlineMap* > weightMaps;
60 int num_src_ghost_layers;
61 int num_tgt_ghost_layers;
96 std::map< int, ParCommGraph* >
pgraph;
99 #ifdef MOAB_HAVE_TEMPESTREMAP
103 TempestMapAppData tempestData;
104 std::map< std::string, std::string > metadataMap;
148 for(
int i = 0; i < 4; i++ )
162 MPI_Initialized( &flagInit );
199 std::string appstr( str );
203 unsigned int h = 2166136261u;
204 for(
char c : appstr )
206 h ^=
static_cast< unsigned char >( c );
212 h ^=
static_cast< unsigned int >( identifier & 0xFFFFFFFF );
215 return static_cast< int >( h & 0x7FFFFFFF );
235 std::string name( app_name );
239 std::cout <<
" application " << name <<
" already registered \n";
240 return moab::MB_FAILURE;
246 *pid =
apphash( app_name, *compid );
251 MPI_Comm_rank( *comm, &rankHere );
254 std::cout <<
" application " << name <<
" with ID = " << *pid <<
" and external id: " << *compid
255 <<
" is registered now \n";
258 std::cout <<
" convention for external application is to have its id positive \n";
259 return moab::MB_FAILURE;
269 app_data.
name = name;
271 #ifdef MOAB_HAVE_TEMPESTREMAP
272 app_data.tempestData.remapper =
nullptr;
273 app_data.tempestData.num_src_ghost_layers = 0;
274 app_data.tempestData.num_tgt_ghost_layers = 0;
281 #ifdef MOAB_HAVE_TEMPESTREMAP
282 app_data.secondary_file_set = app_data.
file_set;
308 assert( app_name !=
nullptr );
309 std::string name( app_name );
318 ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
346 appData& data = appIterator->second;
352 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
353 <<
" is de-registered now \n";
359 fileents.
insert( fileSet );
362 #ifdef MOAB_HAVE_TEMPESTREMAP
363 if( data.tempestData.remapper )
delete data.tempestData.remapper;
364 if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
371 auto& pargs = data.
pgraph;
373 for(
auto mt = pargs.begin(); mt != pargs.end(); ++mt )
386 data.
pcomm =
nullptr;
401 if( !adj_ents_left.
empty() )
405 vertices =
subtract( vertices, conn_verts );
412 if( *pid == mit->second )
418 data.
pcomm =
nullptr;
442 int* num_global_vertices,
443 int* num_global_elements,
448 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
450 #ifdef MOAB_HAVE_HDF5
451 std::string filen( filename );
456 if( num_global_vertices ) *num_global_vertices = 0;
457 if( num_global_elements ) *num_global_elements = 0;
458 if( num_dimension ) *num_dimension = 0;
459 if( num_parts ) *num_parts = 0;
463 unsigned long max_id;
466 file =
mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
470 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
471 return moab::MB_FAILURE;
479 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
480 return moab::MB_FAILURE;
484 if( num_global_vertices ) *num_global_vertices = (int)data->
nodes.
count;
493 edges += ent_d->
count;
498 faces += ent_d->
count;
503 faces += ent_d->
count;
508 faces += ent_d->
count;
513 regions += ent_d->
count;
518 regions += ent_d->
count;
523 regions += ent_d->
count;
528 regions += ent_d->
count;
533 regions += ent_d->
count;
538 regions += ent_d->
count;
543 regions += ent_d->
count;
547 if( num_parts ) *num_parts = data->
numEntSets[0];
552 if( num_dimension ) *num_dimension = 1;
553 if( num_global_elements ) *num_global_elements = edges;
558 if( num_dimension ) *num_dimension = 2;
559 if( num_global_elements ) *num_global_elements = faces;
564 if( num_dimension ) *num_dimension = 3;
565 if( num_global_elements ) *num_global_elements = regions;
573 std::cout << filename
574 <<
": Please reconfigure with HDF5. Cannot retrieve header information for file "
575 "formats other than a h5m file.\n";
576 if( num_global_vertices ) *num_global_vertices = 0;
577 if( num_global_elements ) *num_global_elements = 0;
578 if( num_dimension ) *num_dimension = 0;
579 if( num_parts ) *num_parts = 0;
588 int* num_ghost_layers )
591 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
595 std::ostringstream newopts;
596 if( read_options ) newopts << read_options;
604 std::string opts( ( read_options ? read_options :
"" ) );
605 std::string pcid(
"PARALLEL_COMM=" );
606 std::size_t found = opts.find( pcid );
608 if( found != std::string::npos )
610 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
611 return moab::MB_FAILURE;
616 std::string filen( filename );
617 std::string::size_type idx = filen.rfind(
'.' );
619 if( idx != std::string::npos )
622 std::string extension = filen.substr( idx + 1 );
623 if( ( extension == std::string(
"h5m" ) ) || ( extension == std::string(
"nc" ) ) )
624 newopts <<
";;PARALLEL_COMM=" << pco->
get_id();
627 if( *num_ghost_layers >= 1 )
631 std::string pcid2(
"PARALLEL_GHOSTS=" );
632 std::size_t found2 = opts.find( pcid2 );
634 if( found2 != std::string::npos )
636 std::cout <<
" PARALLEL_GHOSTS option is already specified, ignore passed "
637 "number of layers \n";
644 newopts <<
";PARALLEL_GHOSTS=3.0." << *num_ghost_layers <<
".3";
650 IMOAB_ASSERT( *num_ghost_layers == 0,
"Cannot provide ghost layers in serial." );
658 std::ostringstream outfile;
661 int rank = pco->
rank();
662 int nprocs = pco->
size();
663 outfile <<
"TaskMesh_n" << nprocs <<
"." << rank <<
".h5m";
665 outfile <<
"TaskMesh_n1.0.h5m";
682 bool primary_set =
true )
685 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
690 std::ostringstream newopts;
692 std::string write_opts( ( write_options ? write_options :
"" ) );
693 std::string pcid(
"PARALLEL_COMM=" );
695 if( write_opts.find( pcid ) != std::string::npos )
697 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
698 return moab::MB_FAILURE;
702 std::string pw(
"PARALLEL=WRITE_PART" );
703 if( write_opts.find( pw ) != std::string::npos )
706 newopts <<
"PARALLEL_COMM=" << pco->
get_id() <<
";";
711 #ifdef MOAB_HAVE_TEMPESTREMAP
714 if( data.tempestData.remapper !=
nullptr )
716 else if( data.
file_set != data.secondary_file_set )
717 fileSet = data.secondary_file_set;
719 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Invalid secondary file set handle" );
724 if( write_options ) newopts << write_options;
726 std::vector< Tag > copyTagList = data.
tagList;
728 std::string gid_name_tag(
"GLOBAL_ID" );
731 if( data.
tagMap.find( gid_name_tag ) == data.
tagMap.end() )
734 copyTagList.push_back( gid );
737 std::string pp_name_tag(
"PARALLEL_PARTITION" );
740 if( data.
tagMap.find( pp_name_tag ) == data.
tagMap.end() )
744 if( ptag ) copyTagList.push_back( ptag );
751 copyTagList.size() ) );
769 IMOAB_ASSERT( strlen( prefix ),
"Invalid prefix string length." );
771 std::ostringstream file_name;
772 int rank = 0,
size = 1;
775 rank = pcomm->
rank();
778 file_name << prefix <<
"_" <<
size <<
"_" << rank <<
".h5m";
853 int local[2], global[2];
856 MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->
comm() );
886 int* num_visible_vertices,
887 int* num_visible_elements,
888 int* num_visible_blocks,
889 int* num_visible_surfaceBC,
890 int* num_visible_vertexBC )
898 if( num_visible_elements )
902 num_visible_elements[0] =
static_cast< int >( data.
owned_elems.
size() );
903 num_visible_elements[1] =
static_cast< int >( data.
ghost_elems.
size() );
905 if( num_visible_vertices )
907 num_visible_vertices[2] =
static_cast< int >( data.
all_verts.
size() );
910 num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
913 if( num_visible_blocks )
919 num_visible_blocks[0] = num_visible_blocks[2];
920 num_visible_blocks[1] = 0;
923 if( num_visible_surfaceBC )
928 num_visible_surfaceBC[2] = 0;
933 for(
int i = 0; i < numNeuSets; i++ )
942 Range adjPrimaryEnts;
945 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.
size();
949 num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
950 num_visible_surfaceBC[1] = 0;
953 if( num_visible_vertexBC )
958 num_visible_vertexBC[2] = 0;
961 for(
int i = 0; i < numDiriSets; i++ )
967 num_visible_vertexBC[2] += (int)verts.
size();
970 num_visible_vertexBC[0] = num_visible_vertexBC[2];
971 num_visible_vertexBC[1] = 0;
984 IMOAB_ASSERT( *vertices_length ==
static_cast< int >( verts.
size() ),
"Invalid vertices length provided" );
992 assert( vertices_length && *vertices_length );
993 assert( visible_global_rank_ID );
1005 if( i != *vertices_length )
1007 return moab::MB_FAILURE;
1013 if( (
int)verts.
size() != *vertices_length )
1015 return moab::MB_FAILURE;
1020 visible_global_rank_ID[i] = 0;
1033 if( *coords_length != 3 * (
int)verts.
size() )
1035 return moab::MB_FAILURE;
1049 if( *block_length != (
int)matSets.
size() )
1051 return moab::MB_FAILURE;
1059 for(
unsigned i = 0; i < matSets.
size(); i++ )
1061 matIdx[global_block_IDs[i]] = i;
1069 int* vertices_per_element,
1070 int* num_elements_in_block )
1072 assert( global_block_ID );
1075 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1077 if( it == matMap.end() )
1079 return moab::MB_FAILURE;
1082 int blockIndex = matMap[*global_block_ID];
1089 return moab::MB_FAILURE;
1096 return moab::MB_FAILURE;
1103 *vertices_per_element = num_verts;
1104 *num_elements_in_block = (int)blo_elems.
size();
1110 int* num_visible_elements,
1116 #ifdef MOAB_HAVE_MPI
1126 #ifdef MOAB_HAVE_MPI
1151 return moab::MB_FAILURE;
1154 if( -1 >= *num_visible_elements )
1156 return moab::MB_FAILURE;
1159 block_IDs[index] = valMatTag;
1168 int* connectivity_length,
1169 int* element_connectivity )
1171 assert( global_block_ID );
1172 assert( connectivity_length );
1175 std::map< int, int >& matMap = data.
matIndex;
1176 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1178 if( it == matMap.end() )
1180 return moab::MB_FAILURE;
1183 int blockIndex = matMap[*global_block_ID];
1185 std::vector< EntityHandle > elems;
1191 return moab::MB_FAILURE;
1194 std::vector< EntityHandle > vconnect;
1197 if( *connectivity_length != (
int)vconnect.size() )
1199 return moab::MB_FAILURE;
1202 for(
int i = 0; i < *connectivity_length; i++ )
1208 return moab::MB_FAILURE;
1211 element_connectivity[i] = inx;
1219 int* connectivity_length,
1220 int* element_connectivity )
1222 assert( elem_index );
1223 assert( connectivity_length );
1226 assert( ( *elem_index >= 0 ) && ( *elem_index < (
int)data.
primary_elems.
size() ) );
1235 if( *connectivity_length < num_nodes )
1237 return moab::MB_FAILURE;
1240 for(
int i = 0; i < num_nodes; i++ )
1246 return moab::MB_FAILURE;
1249 element_connectivity[i] = index;
1252 *connectivity_length = num_nodes;
1259 int* num_elements_in_block,
1260 int* element_ownership )
1262 assert( global_block_ID );
1263 assert( num_elements_in_block );
1267 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1269 if( it == matMap.end() )
1271 return moab::MB_FAILURE;
1274 int blockIndex = matMap[*global_block_ID];
1282 return moab::MB_FAILURE;
1285 if( *num_elements_in_block != (
int)elems.
size() )
1287 return moab::MB_FAILURE;
1291 #ifdef MOAB_HAVE_MPI
1297 #ifdef MOAB_HAVE_MPI
1300 element_ownership[i] = 0;
1309 int* num_elements_in_block,
1313 assert( global_block_ID );
1314 assert( num_elements_in_block );
1317 std::map< int, int >& matMap = data.
matIndex;
1319 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1321 if( it == matMap.end() )
1323 return moab::MB_FAILURE;
1326 int blockIndex = matMap[*global_block_ID];
1333 return moab::MB_FAILURE;
1336 if( *num_elements_in_block != (
int)elems.
size() )
1338 return moab::MB_FAILURE;
1344 for(
int i = 0; i < *num_elements_in_block; i++ )
1348 if( -1 == local_element_ID[i] )
1350 return moab::MB_FAILURE;
1358 int* surface_BC_length,
1360 int* reference_surface_ID,
1361 int* boundary_condition_value )
1372 for(
int i = 0; i < numNeuSets; i++ )
1384 Range adjPrimaryEnts;
1402 if( -1 == local_element_ID[index] )
1404 return moab::MB_FAILURE;
1411 boundary_condition_value[index] = neuVal;
1417 if( index != *surface_BC_length )
1419 return moab::MB_FAILURE;
1426 int* vertex_BC_length,
1428 int* boundary_condition_value )
1437 for(
int i = 0; i < numDiriSets; i++ )
1456 if( -1 == local_vertex_ID[index] )
1458 return moab::MB_FAILURE;
1461 boundary_condition_value[index] = diriVal;
1466 if( *vertex_BC_length != index )
1468 return moab::MB_FAILURE;
1476 std::string& separator,
1477 std::vector< std::string >& list_tag_names )
1481 while( ( pos = input_names.find( separator ) ) != std::string::npos )
1483 token = input_names.substr( 0, pos );
1484 if( !token.empty() ) list_tag_names.push_back( token );
1486 input_names.erase( 0, pos + separator.length() );
1488 if( !input_names.empty() )
1491 list_tag_names.push_back( input_names );
1499 int* components_per_entity,
1504 if( *tag_type < 0 || *tag_type > 5 )
return moab::MB_FAILURE;
1508 void* defaultVal =
nullptr;
1509 int* defInt =
new int[*components_per_entity];
1510 double* defDouble =
new double[*components_per_entity];
1513 for(
int i = 0; i < *components_per_entity; i++ )
1516 defDouble[i] = -1e+10;
1525 defaultVal = defInt;
1531 defaultVal = defDouble;
1537 defaultVal = defHandle;
1543 defaultVal = defInt;
1549 defaultVal = defDouble;
1555 defaultVal = defHandle;
1562 return moab::MB_FAILURE;
1567 std::string tag_name( tag_storage_name );
1570 std::vector< std::string > tagNames;
1571 std::string separator(
":" );
1576 int already_defined_tags = 0;
1579 for(
size_t i = 0; i < tagNames.size(); i++ )
1586 std::map< std::string, Tag >& mTags = data.
tagMap;
1587 std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1589 if( mit == mTags.end() )
1592 mTags[tagNames[i]] = tagHandle;
1594 *tag_index = (int)data.
tagList.size();
1595 data.
tagList.push_back( tagHandle );
1598 already_defined_tags++;
1602 data.
tagMap[tagNames[i]] = tagHandle;
1603 *tag_index = (int)data.
tagList.size();
1604 data.
tagList.push_back( tagHandle );
1608 rval = moab::MB_FAILURE;
1613 #ifdef MOAB_HAVE_MPI
1615 rankHere = pco->
rank();
1617 if( !rankHere && already_defined_tags )
1618 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
1619 <<
" has " << already_defined_tags <<
" already defined tags out of " << tagNames.size()
1629 int* num_tag_storage_length,
1631 int* tag_storage_data )
1633 std::string tag_name( tag_storage_name );
1638 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
return moab::MB_FAILURE;
1650 MB_CHK_SET_ERR( moab::MB_FAILURE,
"The tag is not of integer type." );
1656 int nents_to_be_set = *num_tag_storage_length / tagLength;
1658 if( nents_to_be_set > (
int)ents_to_set->
size() )
1660 return moab::MB_FAILURE;
1671 int* num_tag_storage_length,
1673 int* tag_storage_data )
1676 std::string tag_name( tag_storage_name );
1680 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
1682 return moab::MB_FAILURE;
1695 MB_CHK_SET_ERR( moab::MB_FAILURE,
"The tag is not of integer type." );
1701 int nents_to_get = *num_tag_storage_length / tagLength;
1703 if( nents_to_get > (
int)ents_to_get->
size() )
1705 return moab::MB_FAILURE;
1716 int* num_tag_storage_length,
1718 double* tag_storage_data )
1721 std::string tag_names( tag_storage_names );
1723 std::vector< std::string > tagNames;
1724 std::vector< Tag > tagHandles;
1725 std::string separator(
":" );
1733 int nents_to_be_set = (int)( *ents_to_set ).
size();
1735 for(
size_t i = 0; i < tagNames.size(); i++ )
1737 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1739 return moab::MB_FAILURE;
1752 return moab::MB_FAILURE;
1756 if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1757 return moab::MB_FAILURE;
1761 position = position + tagLength * nents_to_be_set;
1768 int* num_tag_storage_length,
1770 double* tag_storage_data,
1774 std::string tag_names( tag_storage_names );
1776 std::vector< std::string > tagNames;
1777 std::vector< Tag > tagHandles;
1778 std::string separator(
":" );
1785 int nents_to_be_set = (int)( *ents_to_set ).
size();
1788 std::vector< int > gids;
1789 gids.resize( nents_to_be_set );
1795 std::map< int, EntityHandle > eh_by_gid;
1799 eh_by_gid[gids[i]] = *it;
1802 int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
1804 std::set< int > globalIdsSet;
1805 for(
int j = 0; j < nbLocalVals; j++ )
1806 globalIdsSet.insert( globalIds[j] );
1807 if( globalIdsSet.size() < nbLocalVals )
1809 std::cout <<
"iMOAB_SetDoubleTagStorageWithGid: for pid:" << *pid <<
" tags[0]:" << tagNames[0]
1810 <<
" global ids passed are not unique, major error\n";
1811 std::cout <<
" nbLocalVals:" << nbLocalVals <<
" globalIdsSet.size():" << globalIdsSet.size()
1812 <<
" first global id:" << globalIds[0] <<
"\n";
1813 return moab::MB_FAILURE;
1816 std::vector< int > tagLengths( tagNames.size() );
1817 std::vector< Tag > tagList;
1818 size_t total_tag_len = 0;
1819 for(
size_t i = 0; i < tagNames.size(); i++ )
1821 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1823 MB_SET_ERR( moab::MB_FAILURE,
"tag missing" );
1827 tagList.push_back( tag );
1832 total_tag_len += tagLength;
1833 tagLengths[i] = tagLength;
1839 MB_SET_ERR( moab::MB_FAILURE,
"tag not double type" );
1843 #ifdef MOAB_HAVE_MPI
1845 unsigned num_procs = pco->
size();
1846 if( num_procs > 1 ) serial =
false;
1855 for(
int i = 0; i < nents_to_be_set; i++ )
1857 int gid = globalIds[i];
1858 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1859 if( mapIt == eh_by_gid.end() )
continue;
1862 int indexInTagValues = 0;
1863 for(
size_t j = 0; j < tagList.size(); j++ )
1865 indexInTagValues += i * tagLengths[j];
1868 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];
1872 #ifdef MOAB_HAVE_MPI
1881 TLsend.
initialize( 2, 0, 0, total_tag_len, nbLocalVals );
1885 int indexInRealLocal = 0;
1886 for(
int i = 0; i < nbLocalVals; i++ )
1889 int marker = globalIds[i];
1890 int to_proc = marker % num_procs;
1891 int n = TLsend.
get_n();
1892 TLsend.
vi_wr[2 * n] = to_proc;
1893 TLsend.
vi_wr[2 * n + 1] = marker;
1894 int indexInTagValues = 0;
1896 for(
size_t j = 0; j < tagList.size(); j++ )
1898 indexInTagValues += i * tagLengths[j];
1899 for(
int k = 0; k < tagLengths[j]; k++ )
1901 TLsend.
vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1903 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1913 TLreq.
initialize( 2, 0, 0, 0, nents_to_be_set );
1915 for(
int i = 0; i < nents_to_be_set; i++ )
1918 int marker = gids[i];
1919 int to_proc = marker % num_procs;
1920 int n = TLreq.
get_n();
1921 TLreq.
vi_wr[2 * n] = to_proc;
1922 TLreq.
vi_wr[2 * n + 1] = marker;
1933 sort_buffer.buffer_init( TLreq.
get_n() );
1934 TLreq.
sort( 1, &sort_buffer );
1935 sort_buffer.
reset();
1936 sort_buffer.buffer_init( TLsend.
get_n() );
1937 TLsend.
sort( 1, &sort_buffer );
1938 sort_buffer.
reset();
1946 TLBack.
initialize( 3, 0, 0, total_tag_len, 0 );
1949 int n1 = TLreq.
get_n();
1950 int n2 = TLsend.
get_n();
1952 int indexInTLreq = 0;
1953 int indexInTLsend = 0;
1954 if( n1 > 0 && n2 > 0 )
1957 while( indexInTLreq < n1 && indexInTLsend < n2 )
1959 int currentValue1 = TLreq.
vi_rd[2 * indexInTLreq + 1];
1960 int currentValue2 = TLsend.
vi_rd[2 * indexInTLsend + 1];
1961 if( currentValue1 < currentValue2 )
1969 if( currentValue1 > currentValue2 )
1977 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.
vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1979 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.
vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1982 for(
int i1 = 0; i1 < size1; i1++ )
1984 for(
int i2 = 0; i2 < size2; i2++ )
1987 int n = TLBack.
get_n();
1989 TLBack.
vi_wr[3 * n] = TLreq.
vi_rd[2 * ( indexInTLreq + i1 )];
1991 TLBack.
vi_wr[3 * n + 1] = currentValue1;
1992 TLBack.
vi_wr[3 * n + 2] = TLsend.
vi_rd[2 * ( indexInTLsend + i2 )];
1994 for(
size_t k = 0; k < total_tag_len; k++ )
1996 TLBack.
vr_rd[total_tag_len * n + k] =
1997 TLsend.
vr_rd[total_tag_len * indexInTLsend + k];
2001 indexInTLreq += size1;
2002 indexInTLsend += size2;
2008 n1 = TLBack.
get_n();
2009 double* ptrVal = &TLBack.
vr_rd[0];
2010 for(
int i = 0; i < n1; i++ )
2012 int gid = TLBack.
vi_rd[3 * i + 1];
2013 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2014 if( mapIt == eh_by_gid.end() )
continue;
2018 for(
size_t j = 0; j < tagList.size(); j++ )
2022 ptrVal += tagLengths[j];
2031 int* num_tag_storage_length,
2033 double* tag_storage_data )
2037 std::string tag_names( tag_storage_names );
2039 std::vector< std::string > tagNames;
2040 std::vector< Tag > tagHandles;
2041 std::string separator(
":" );
2047 Range* ents_to_get =
nullptr;
2049 if( *ent_type == 0 )
2053 else if( *ent_type == 1 )
2057 int nents_to_get = (int)ents_to_get->
size();
2059 for(
size_t i = 0; i < tagNames.size(); i++ )
2061 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
2063 return moab::MB_FAILURE;
2076 return moab::MB_FAILURE;
2079 if( position + nents_to_get * tagLength > *num_tag_storage_length )
2080 return moab::MB_FAILURE;
2083 position = position + nents_to_get * tagLength;
2091 #ifdef MOAB_HAVE_MPI
2094 std::vector< Tag > tags;
2096 for(
int i = 0; i < *num_tag; i++ )
2098 if( tag_indices[i] < 0 || tag_indices[i] >= (
int)data.
tagList.size() )
2100 return moab::MB_FAILURE;
2103 tags.push_back( data.
tagList[tag_indices[i]] );
2106 if( *ent_type == 0 )
2110 else if( *ent_type == 1 )
2116 return moab::MB_FAILURE;
2127 int k = *pid + *num_tag + *tag_indices + *ent_type;
2136 #ifdef MOAB_HAVE_MPI
2140 if( *tag_index < 0 || *tag_index >= (
int)data.
tagList.size() )
2142 return moab::MB_FAILURE;
2147 if( *ent_type == 0 )
2151 else if( *ent_type == 1 )
2157 return moab::MB_FAILURE;
2168 int k = *pid + *tag_index + *ent_type;
2176 int* num_adjacent_elements,
2188 if( *num_adjacent_elements < (
int)adjs.
size() )
2190 return moab::MB_FAILURE;
2193 *num_adjacent_elements = (int)adjs.
size();
2195 for(
int i = 0; i < *num_adjacent_elements; i++ )
2219 return moab::MB_FAILURE;
2222 int nverts = *coords_len / *
dim;
2236 int* num_nodes_per_element,
2246 EntityType mbtype = (EntityType)( *type );
2255 for(
int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2257 array[j] = connectivity[j] + firstVertex - 1;
2260 Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2271 int set_no = *block_ID;
2272 const void* setno_ptr = &set_no;
2277 if( MB_FAILURE == rval || sets.
empty() )
2289 block_set = sets[0];
2309 if(
nullptr != num_global_verts )
2313 if(
nullptr != num_global_elems )
2321 #ifdef MOAB_HAVE_MPI
2347 return moab::MB_FAILURE;
2364 std::cout <<
" can't get par part tag.\n";
2365 return moab::MB_FAILURE;
2368 int rank = pco->
rank();
2380 if( num_ghost_layers && *num_ghost_layers <= 0 )
2389 constexpr
int addl_ents = 0;
2392 for(
int i = 2; i <= *num_ghost_layers; i++ )
2407 MPI_Comm* joint_communicator,
2408 MPI_Group* receivingGroup,
2412 assert( joint_communicator !=
nullptr );
2413 assert( receivingGroup !=
nullptr );
2414 assert( rcompid !=
nullptr );
2421 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) )
2422 : *joint_communicator );
2423 MPI_Group recvGroup =
2424 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( receivingGroup ) ) : *receivingGroup );
2425 MPI_Comm sender = pco->
comm();
2427 MPI_Group senderGroup;
2428 ierr = MPI_Comm_group( sender, &senderGroup );
2429 if( ierr != 0 )
return moab::MB_FAILURE;
2440 int sender_rank = -1;
2441 MPI_Comm_rank( sender, &sender_rank );
2446 std::vector< int > number_elems_per_part;
2450 if( owned.
size() == 0 )
2459 int local_owned_elem = (int)owned.
size();
2461 int rank = pco->
rank();
2462 number_elems_per_part.resize(
size );
2463 number_elems_per_part[rank] = local_owned_elem;
2464 #if( MPI_VERSION >= 2 )
2466 ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2469 std::vector< int > all_tmp(
size );
2470 ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2471 number_elems_per_part = all_tmp;
2477 return moab::MB_FAILURE;
2498 MPI_Group_free( &senderGroup );
2504 assert( joint_communicator !=
nullptr );
2505 assert( sendingGroup !=
nullptr );
2506 assert( scompid !=
nullptr );
2511 MPI_Comm receive = pco->
comm();
2514 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) )
2515 : *joint_communicator );
2516 MPI_Group sendGroup =
2517 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( sendingGroup ) ) : *sendingGroup );
2521 MPI_Group receiverGroup;
2522 int ierr = MPI_Comm_group( receive, &receiverGroup );
CHK_MPI_ERR( ierr );
2531 int receiver_rank = -1;
2532 MPI_Comm_rank( receive, &receiver_rank );
2536 std::vector< int > pack_array;
2540 int current_receiver = cgraph->
receiver( receiver_rank );
2542 std::vector< int > senders_local;
2545 while( n < pack_array.size() )
2547 if( current_receiver == pack_array[n] )
2549 for(
int j = 0; j < pack_array[n + 1]; j++ )
2551 senders_local.push_back( pack_array[n + 2 + j] );
2557 n = n + 2 + pack_array[n + 1];
2561 std::cout <<
" receiver " << current_receiver <<
" at rank " << receiver_rank <<
" will receive from "
2562 << senders_local.size() <<
" tasks: ";
2564 for(
int k = 0; k < (int)senders_local.size(); k++ )
2566 std::cout <<
" " << senders_local[k];
2572 if( senders_local.empty() )
2574 std::cout <<
" we do not have any senders for receiver rank " << receiver_rank <<
"\n";
2590 if( (
int)senders_local.size() >= 2 )
2601 std::cout <<
"current_receiver " << current_receiver <<
" local verts: " << local_verts.
size() <<
"\n";
2611 std::cout <<
"after merging: new verts: " << new_verts.
size() <<
"\n";
2629 if(
nullptr != densePartTag &&
MB_SUCCESS == rval )
2633 std::vector< int > vals;
2634 int rank = pco->
rank();
2635 vals.resize( local_verts.
size(), rank );
2647 std::cout <<
" can't get par part tag.\n";
2648 return moab::MB_FAILURE;
2651 int rank = pco->
rank();
2663 MPI_Group_free( &receiverGroup );
2670 MPI_Comm* joint_communicator,
2674 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2675 if( mt == data.
pgraph.end() )
2677 std::cout <<
" no par com graph for context_id:" << *context_id <<
" available contexts:";
2678 for(
auto mit = data.
pgraph.begin(); mit != data.
pgraph.end(); mit++ )
2679 std::cout <<
" " << mit->first;
2681 return moab::MB_FAILURE;
2686 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) )
2687 : *joint_communicator );
2690 #ifdef MOAB_HAVE_TEMPESTREMAP
2691 if( data.tempestData.remapper !=
nullptr )
2706 std::string tag_name( tag_storage_name );
2711 std::vector< std::string > tagNames;
2712 std::vector< Tag > tagHandles;
2713 std::string separator(
":" );
2715 for(
size_t i = 0; i < tagNames.size(); i++ )
2719 if(
MB_SUCCESS != rval ||
nullptr == tagHandle )
2722 "can't get tag handle with name: " << tagNames[i].c_str() <<
" at index " << i );
2724 tagHandles.push_back( tagHandle );
2737 MPI_Comm* joint_communicator,
2741 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2742 if( mt == data.
pgraph.end() )
2744 std::cout <<
" no par com graph for context_id:" << *context_id <<
" available contexts:";
2745 for(
auto mit = data.
pgraph.begin(); mit != data.
pgraph.end(); mit++ )
2746 std::cout <<
" " << mit->first;
2748 return moab::MB_FAILURE;
2753 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) )
2754 : *joint_communicator );
2767 #ifdef MOAB_HAVE_TEMPESTREMAP
2768 if( data.tempestData.remapper !=
nullptr )
2775 std::string tag_name( tag_storage_name );
2777 std::vector< std::string > tagNames;
2778 std::vector< Tag > tagHandles;
2779 std::string separator(
":" );
2781 for(
size_t i = 0; i < tagNames.size(); i++ )
2785 if(
MB_SUCCESS != rval ||
nullptr == tagHandle )
2788 " can't get tag handle for tag named:" << tagNames[i].c_str() <<
" at index " << i );
2790 tagHandles.push_back( tagHandle );
2794 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name
2795 <<
" and file set = " << ( data.
file_set ) <<
"\n";
2802 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name <<
"\n";
2813 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2814 if( mt == data.
pgraph.end() )
return moab::MB_FAILURE;
2816 mt->second->release_send_buffers();
2823 MPI_Comm* joint_communicator,
2831 assert( joint_communicator );
2835 int localRank = 0, numProcs = 1;
2837 bool isFortran =
false;
2838 if( *pid1 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid1].is_fortran;
2839 if( *pid2 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid2].is_fortran;
2842 ( isFortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) ) : *joint_communicator );
2843 MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group1 ) ) : *group1 );
2844 MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group2 ) ) : *group2 );
2846 MPI_Comm_rank( global, &localRank );
2847 MPI_Comm_size( global, &numProcs );
2857 auto mt = data.
pgraph.find( *comp2 );
2860 cgraph =
new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2866 auto mt = data.
pgraph.find( *comp1 );
2869 cgraph_rev =
new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2886 int lenTagType1 = 1;
2887 if( 1 == *type1 || 1 == *type2 )
2894 std::vector< int > valuesComp1;
2901 #ifdef MOAB_HAVE_TEMPESTREMAP
2902 if( data1.tempestData.remapper !=
nullptr )
2905 Range ents_of_interest;
2910 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
2913 else if( *type1 == 2 )
2916 valuesComp1.resize( ents_of_interest.
size() );
2919 else if( *type1 == 3 )
2922 valuesComp1.resize( ents_of_interest.
size() );
2931 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2932 TLcomp1.
resize( uniq.size() );
2933 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2937 int to_proc = marker % numProcs;
2938 int n = TLcomp1.
get_n();
2939 TLcomp1.
vi_wr[2 * n] = to_proc;
2940 TLcomp1.
vi_wr[2 * n + 1] = marker;
2950 std::stringstream ff1;
2951 ff1 <<
"TLcomp1_" << localRank <<
".txt";
2955 sort_buffer.buffer_init( TLcomp1.
get_n() );
2956 TLcomp1.
sort( 1, &sort_buffer );
2957 sort_buffer.
reset();
2966 std::vector< int > valuesComp2;
2972 #ifdef MOAB_HAVE_TEMPESTREMAP
2973 if( data2.tempestData.remapper !=
nullptr )
2977 Range ents_of_interest;
2982 valuesComp2.resize( ents_of_interest.
size() * lenTagType1 );
2985 else if( *type2 == 2 )
2988 valuesComp2.resize( ents_of_interest.
size() );
2991 else if( *type2 == 3 )
2994 valuesComp2.resize( ents_of_interest.
size() );
3002 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3003 TLcomp2.
resize( uniq.size() );
3004 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3008 int to_proc = marker % numProcs;
3009 int n = TLcomp2.
get_n();
3010 TLcomp2.
vi_wr[2 * n] = to_proc;
3011 TLcomp2.
vi_wr[2 * n + 1] = marker;
3019 std::stringstream ff2;
3020 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3023 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3024 TLcomp2.
sort( 1, &sort_buffer );
3025 sort_buffer.
reset();
3047 int n1 = TLcomp1.
get_n();
3048 int n2 = TLcomp2.
get_n();
3050 int indexInTLComp1 = 0;
3051 int indexInTLComp2 = 0;
3052 if( n1 > 0 && n2 > 0 )
3054 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3056 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
3057 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
3058 if( currentValue1 < currentValue2 )
3066 if( currentValue1 > currentValue2 )
3074 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3076 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3079 for(
int i1 = 0; i1 < size1; i1++ )
3081 for(
int i2 = 0; i2 < size2; i2++ )
3084 int n = TLBackToComp1.
get_n();
3086 TLBackToComp1.
vi_wr[3 * n] =
3087 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3089 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
3090 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3091 n = TLBackToComp2.
get_n();
3093 TLBackToComp2.
vi_wr[3 * n] =
3094 TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3095 TLBackToComp2.
vi_wr[3 * n + 1] = currentValue1;
3096 TLBackToComp2.
vi_wr[3 * n + 2] = TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3100 indexInTLComp1 += size1;
3101 indexInTLComp2 += size2;
3114 std::stringstream f1;
3115 f1 <<
"TLBack1_" << localRank <<
".txt";
3118 sort_buffer.buffer_reserve( TLBackToComp1.
get_n() );
3119 TLBackToComp1.
sort( 1, &sort_buffer );
3120 sort_buffer.
reset();
3135 std::stringstream f2;
3136 f2 <<
"TLBack2_" << localRank <<
".txt";
3139 sort_buffer.buffer_reserve( TLBackToComp2.
get_n() );
3140 TLBackToComp2.
sort( 2, &sort_buffer );
3141 sort_buffer.
reset();
3156 std::vector< Tag > tagsList;
3159 if( !tag || rval !=
MB_SUCCESS )
return moab::MB_FAILURE;
3160 tagsList.push_back( tag );
3162 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3164 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3165 double tol = 1.0e-9;
3201 std::cout <<
" can't get par part tag.\n";
3202 return moab::MB_FAILURE;
3205 int rank = pco->
rank();
3211 #ifdef MOAB_HAVE_TEMPESTREMAP
3218 ErrCode iMOAB_CoverageGraph( MPI_Comm* joint_communicator,
3227 std::vector< int > srcSenders, receivers;
3229 bool is_fortran_context =
false;
3232 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Invalid source application ID specified: " << *pid_src );
3234 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Invalid migration/coverage application ID specified: " << *pid_migr );
3236 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Invalid intersection application ID specified: " << *pid_intx );
3243 int default_context_id = *migration_id;
3244 assert( dataSrc.
global_id == *source_id );
3245 is_fortran_context = dataSrc.
is_fortran || is_fortran_context;
3246 if( dataSrc.
pgraph.find( default_context_id ) != dataSrc.
pgraph.end() )
3247 sendGraph = dataSrc.
pgraph[default_context_id];
3249 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Could not find source ParCommGraph with default migration context" );
3252 srcSenders = sendGraph->
senders();
3255 std::cout <<
"senders: " << srcSenders.size() <<
" first sender: " << srcSenders[0] << std::endl;
3260 if( *pid_migr >= 0 )
3263 is_fortran_context = dataMigr.
is_fortran || is_fortran_context;
3265 int default_context_id = *source_id;
3266 assert( dataMigr.
global_id == *migration_id );
3267 if( dataMigr.
pgraph.find( default_context_id ) != dataMigr.
pgraph.end() )
3268 recvGraph = dataMigr.
pgraph[default_context_id];
3271 "Could not find coverage receiver ParCommGraph with default migration context" );
3273 srcSenders = recvGraph->
senders();
3276 std::cout <<
"receivers: " << receivers.size() <<
" first receiver: " << receivers[0] << std::endl;
3280 if( *pid_intx >= 0 ) is_fortran_context =
context.
appDatas[*pid_intx].is_fortran || is_fortran_context;
3292 MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( joint_communicator ) )
3293 : *joint_communicator );
3294 int currentRankInJointComm = -1;
CHK_MPI_ERR( MPI_Comm_rank( global, ¤tRankInJointComm ) );
3302 if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3320 std::map< int, std::set< int > > idsFromProcs;
3321 if( intx_cells.
size() )
3328 for(
auto it = intx_cells.
begin(); it != intx_cells.
end(); ++it )
3338 if( origProc < 0 )
continue;
3342 idsFromProcs[origProc].insert( gidCell );
3359 for(
auto it = cover_cells.
begin(); it != cover_cells.
end(); ++it )
3368 if( origProc < 0 )
continue;
3372 assert( gidCell > 0 );
3373 idsFromProcs[origProc].insert( gidCell );
3378 std::ofstream dbfile;
3379 std::stringstream outf;
3380 outf <<
"idsFromProc_0" << currentRankInJointComm <<
".txt";
3381 dbfile.open( outf.str().c_str() );
3382 dbfile <<
"Writing this to a file.\n";
3384 dbfile <<
" map size:" << idsFromProcs.size()
3388 for( std::map<
int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3390 std::set< int >& setIds = mt->second;
3391 dbfile <<
"from id: " << mt->first <<
" receive " << setIds.size() <<
" cells \n";
3393 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3396 dbfile <<
" " << valueID;
3398 if( counter % 10 == 0 ) dbfile <<
"\n";
3405 if(
nullptr != recvGraph )
3417 << *context_id <<
" already exists. Check the workflow" );
3419 for( std::map<
int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); ++mit )
3421 int procToSendTo = mit->first;
3422 std::set< int >& idSet = mit->second;
3423 for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); ++sit )
3425 int n = TLcovIDs.
get_n();
3427 TLcovIDs.
vi_wr[2 * n] = procToSendTo;
3428 TLcovIDs.
vi_wr[2 * n + 1] = *sit;
3436 pc.crystal_router()->gs_transfer( 1, TLcovIDs, 0 );
3439 if(
nullptr != sendGraph )
3445 dataSrc.
pgraph[*context_id] = newSendGraph;
3453 assert( prefix && strlen( prefix ) );
3456 std::string prefix_str( prefix );
3458 if(
nullptr != cgraph )
3462 std::cout <<
" cannot find ParCommGraph on app with pid " << *pid <<
" name: " <<
context.
appDatas[*pid].name
3463 <<
" context: " << *context_id <<
"\n";
3472 #ifdef MOAB_HAVE_TEMPESTREMAP
3474 #ifdef MOAB_HAVE_NETCDF
3476 static ErrCode set_aream_from_trivial_distribution(
iMOAB_AppID pid,
int N, std::vector< double >& trvArea )
3488 int size = 1, rank = 0;
3489 #ifdef MOAB_HAVE_MPI
3492 rank = pcomm->
rank();
3503 size_t nents_to_be_set = ents_to_set.
size();
3506 std::vector< int > globalIds( nents_to_be_set );
3509 const bool serial = (
size == 1 );
3516 for(
size_t i = 0; i < nents_to_be_set; i++ )
3518 int gid = globalIds[i];
3521 assert( indexInVal < N );
3526 #ifdef MOAB_HAVE_MPI
3537 TLreq.
initialize( 3, 0, 0, 0, nents_to_be_set );
3542 for(
size_t i = 0; i < nents_to_be_set; i++ )
3545 int marker = globalIds[i];
3546 int to_proc = ( marker - 1 ) / nL;
3548 if( to_proc ==
size ) to_proc =
size - 1;
3549 int n = TLreq.
get_n();
3550 TLreq.
vi_wr[3 * n] = to_proc;
3551 TLreq.
vi_wr[3 * n + 1] = marker;
3552 TLreq.
vi_wr[3 * n + 2] = i;
3560 int sizeBack = TLreq.
get_n();
3565 for(
int i = 0; i < sizeBack; i++ )
3567 int from_proc = TLreq.
vi_wr[3 * i];
3568 TLBack.
vi_wr[3 * i] = from_proc;
3569 int marker = TLreq.
vi_wr[3 * i + 1];
3570 TLBack.
vi_wr[3 * i + 1] = marker;
3571 TLBack.
vi_wr[3 * i + 2] = TLreq.
vi_wr[3 * i + 2];
3573 int index = marker - 1 - rank * nL;
3574 TLBack.
vr_wr[i] = trvArea[index];
3580 int n1 = TLBack.
get_n();
3581 for(
int i = 0; i < n1; i++ )
3584 int origIndex = TLBack.
vi_rd[3 * i + 2];
3595 ErrCode iMOAB_LoadMappingWeightsFromFile(
3604 assert( srctype && tgttype );
3614 TempestMapAppData& tdata = data_intx.tempestData;
3620 if( tdata.remapper ==
nullptr )
3626 #ifdef MOAB_HAVE_MPI
3632 tdata.remapper->meshValidate =
true;
3633 tdata.remapper->constructEdgeMap =
true;
3636 tdata.remapper->initialize(
false );
3648 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
3652 assert( weightMap !=
nullptr );
3658 int src_elem_dof_length = 1, tgt_elem_dof_length = 1;
3660 Tag gdsTag =
nullptr;
3661 if( *srctype == 1 || *tgttype == 1 )
3678 std::vector< int > tgtDofValues;
3682 Range tgt_ents_of_interest;
3688 tgtDofValues.resize( tgt_ents_of_interest.
size() * tgt_elem_dof_length );
3691 else if( *tgttype == 2 )
3695 tgtDofValues.resize( tgt_ents_of_interest.
size() * tgt_elem_dof_length );
3698 else if( *tgttype == 3 )
3702 tgtDofValues.resize( tgt_ents_of_interest.
size() * tgt_elem_dof_length );
3712 std::vector< int > sortTgtDofs( tgtDofValues.begin(), tgtDofValues.end() );
3713 std::sort( sortTgtDofs.begin(), sortTgtDofs.end() );
3714 sortTgtDofs.erase( std::unique( sortTgtDofs.begin(), sortTgtDofs.end() ), sortTgtDofs.end() );
3722 #ifdef MOAB_HAVE_MPI
3725 std::cout <<
" aream tag already defined \n ";
3728 std::vector< double > trvAreaA, trvAreaB;
3731 "reading map from disk failed" );
3735 tdata.pid_src = pid_source;
3736 tdata.pid_dest = pid_target;
3747 MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_source, nA, trvAreaA ),
" fail to set aream on source " );
3748 MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_target, nB, trvAreaB ),
" fail to set aream on target " );
3759 std::string metadataStr = std::string( remap_weights_filename ) +
";FV:1:GLOBAL_ID;FV:1:GLOBAL_ID";
3761 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
3766 ErrCode iMOAB_WriteMappingWeightsToFile(
3771 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3772 assert( remap_weights_filename && strlen( remap_weights_filename ) );
3778 TempestMapAppData& tdata = data_intx.tempestData;
3781 assert( tdata.remapper !=
nullptr );
3785 assert( weightMap !=
nullptr );
3787 std::string filename = std::string( remap_weights_filename );
3789 std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3790 std::map< std::string, std::string > attrMap;
3791 attrMap[
"title"] =
"MOAB-TempestRemap Online Regridding Weight Generator";
3792 attrMap[
"normalization"] =
"ovarea";
3793 attrMap[
"map_aPb"] = filename;
3813 attrMap[
"concave_a"] =
"false";
3814 attrMap[
"concave_b"] =
"false";
3815 attrMap[
"bubble"] =
"true";
3824 #ifdef MOAB_HAVE_MPI
3827 MPI_Comm* jointcomm,
3834 assert( jointcomm );
3837 bool is_fortran =
false;
3838 if( *pid1 >= 0 ) is_fortran =
context.
appDatas[*pid1].is_fortran || is_fortran;
3839 if( *pid2 >= 0 ) is_fortran =
context.
appDatas[*pid2].is_fortran || is_fortran;
3841 MPI_Comm joint_communicator =
3842 ( is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( jointcomm ) ) : *jointcomm );
3845 int localRank = 0, numProcs = 1;
3848 MPI_Comm_rank( joint_communicator, &localRank );
3849 MPI_Comm_size( joint_communicator, &numProcs );
3865 int lenTagType1 = 1;
3873 std::vector< int > valuesComp1;
3876 Range ents_of_interest;
3887 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
3890 else if( *type == 2 )
3893 valuesComp1.resize( ents_of_interest.
size() );
3896 else if( *type == 3 )
3899 valuesComp1.resize( ents_of_interest.
size() );
3908 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3909 TLcomp1.
resize( uniq.size() );
3910 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3914 int to_proc = marker % numProcs;
3915 int n = TLcomp1.
get_n();
3916 TLcomp1.
vi_wr[2 * n] = to_proc;
3917 TLcomp1.
vi_wr[2 * n + 1] = marker;
3923 pc.crystal_router()->gs_transfer( 1, TLcomp1,
3927 std::stringstream ff1;
3928 ff1 <<
"TLcomp1_" << localRank <<
".txt";
3932 sort_buffer.buffer_init( TLcomp1.
get_n() );
3933 TLcomp1.
sort( 1, &sort_buffer );
3934 sort_buffer.
reset();
3946 std::vector< int > valuesComp2;
3950 TempestMapAppData& tdata = data2.tempestData;
3955 for(
auto mapIt = tdata.weightMaps.begin(); mapIt != tdata.weightMaps.end(); ++mapIt )
3958 std::vector< int > valueDofs;
3960 valuesComp2.insert( valuesComp2.end(), valueDofs.begin(), valueDofs.end() );
3968 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3969 TLcomp2.
resize( uniq.size() );
3970 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3974 int to_proc = marker % numProcs;
3975 int n = TLcomp2.
get_n();
3976 TLcomp2.
vi_wr[2 * n] = to_proc;
3977 TLcomp2.
vi_wr[2 * n + 1] = marker;
3981 pc.crystal_router()->gs_transfer( 1, TLcomp2,
3986 std::stringstream ff2;
3987 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3990 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3991 TLcomp2.
sort( 1, &sort_buffer );
3992 sort_buffer.
reset();
4010 int n1 = TLcomp1.
get_n();
4011 int n2 = TLcomp2.
get_n();
4013 int indexInTLComp1 = 0;
4014 int indexInTLComp2 = 0;
4015 if( n1 > 0 && n2 > 0 )
4018 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
4020 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
4021 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
4022 if( currentValue1 < currentValue2 )
4030 if( currentValue1 > currentValue2 )
4038 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
4040 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
4043 for(
int i1 = 0; i1 < size1; i1++ )
4045 for(
int i2 = 0; i2 < size2; i2++ )
4048 int n = TLBackToComp1.
get_n();
4050 TLBackToComp1.
vi_wr[3 * n] =
4051 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
4053 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
4054 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
4057 indexInTLComp1 += size1;
4058 indexInTLComp2 += size2;
4061 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
4073 std::stringstream f1;
4074 f1 <<
"TLBack1_" << localRank <<
".txt";
4078 int n = TLBackToComp1.
get_n();
4079 std::map< int, std::set< int > > uniqueIDs;
4080 for(
int i = 0; i < n; i++ )
4082 int to_proc = TLBackToComp1.
vi_wr[3 * i + 2];
4083 int globalId = TLBackToComp1.
vi_wr[3 * i + 1];
4084 uniqueIDs[to_proc].insert( globalId );
4090 std::map< int, Range > splits;
4091 for(
size_t i = 0; i < ents_of_interest.
size(); i++ )
4094 for(
int j = 0; j < lenTagType1; j++ )
4096 int marker = valuesComp1[i * lenTagType1 + j];
4097 for(
auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
4099 int proc = mit->first;
4100 std::set< int >& setIds = mit->second;
4101 if( setIds.find( marker ) != setIds.end() )
4103 splits[proc].
insert( ent );
4109 std::map< int, Range > verts_to_proc;
4110 int numv = 0, numc = 0;
4111 for(
auto it = splits.begin(); it != splits.end(); it++ )
4113 int to_proc = it->first;
4118 numc += (int)it->second.size();
4122 verts_to_proc[to_proc] = verts;
4123 numv += (int)verts.
size();
4129 for(
auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
4131 int to_proc = it->first;
4132 Range& verts = it->second;
4136 int n = TLv.
get_n();
4137 TLv.
vi_wr[2 * n] = to_proc;
4148 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;
4150 std::vector< int > gdvals;
4154 for(
auto it = splits.begin(); it != splits.end(); it++ )
4156 int to_proc = it->first;
4157 Range& cells = it->second;
4161 int n = TLc.
get_n();
4162 TLc.
vi_wr[size_tuple * n] = to_proc;
4163 int current_index = 2;
4169 current_index += lenTagType1;
4176 TLc.
vi_wr[size_tuple * n + current_index] = nnodes;
4178 &( TLc.
vi_wr[size_tuple * n + current_index + 1] ) );
MB_CHK_ERR( rval );
4184 else if( *pid2 >= 0 )
4194 int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
4200 pc.crystal_router()->gs_transfer( 1, TLv, 0 );
4201 if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );
4206 TempestMapAppData& tdata = dataIntx.tempestData;
4208 std::vector< int > values_entities;
4212 std::map< int, EntityHandle > vertexMap;
4215 int n = TLv.
get_n();
4217 for(
int i = 0; i < n; i++ )
4219 int gid = TLv.
vi_rd[2 * i + 1];
4220 if( vertexMap.find( gid ) == vertexMap.end() )
4232 values_entities.resize( verts.
size() );
4234 primary_ents = verts;
4241 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;
4245 std::map< int, EntityHandle >
4247 for(
int i = 0; i < n; i++ )
4249 int from_proc = TLc.
vi_rd[size_tuple * i];
4250 int globalIdEl = TLc.
vi_rd[size_tuple * i + 1];
4251 if( cellMap.find( globalIdEl ) == cellMap.end() )
4253 int current_index = 2;
4254 if( 1 == *type ) current_index += lenTagType1;
4255 int nnodes = TLc.
vi_rd[size_tuple * i + current_index];
4256 std::vector< EntityHandle > conn;
4257 conn.resize( nnodes );
4258 for(
int j = 0; j < nnodes; j++ )
4260 conn[j] = vertexMap[TLc.
vi_rd[size_tuple * i + current_index + j + 1]];
4263 EntityType entType =
MBQUAD;
4265 if( nnodes < 4 ) entType =
MBTRI;
4267 primary_ents.
insert( new_element );
4268 cellMap[globalIdEl] = new_element;
4280 values_entities.resize( lenTagType1 * primary_ents.
size() );
4285 values_entities.resize( primary_ents.
size() );
4291 if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
4297 std::stringstream fcov;
4298 fcov <<
"MapCover_" << numProcs <<
"_" << localRank <<
".h5m";
4301 std::stringstream ftarg;
4302 ftarg <<
"TargMap_" << numProcs <<
"_" << localRank <<
".h5m";
4305 for(
auto mapIt = tdata.weightMaps.begin(); mapIt != tdata.weightMaps.end(); ++mapIt )
4315 if( ierr != 0 )
return moab::MB_FAILURE;
4344 ErrCode iMOAB_SetMapGhostLayers(
iMOAB_AppID pid,
int* n_src_ghost_layers,
int* n_tgt_ghost_layers )
4349 data.tempestData.num_src_ghost_layers = *n_src_ghost_layers;
4350 data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers;
4358 constexpr
bool validate =
true;
4359 constexpr
bool meshCleanup =
true;
4360 bool gnomonic =
true;
4361 constexpr
double defaultradius = 1.0;
4362 constexpr
double boxeps = 1.e-10;
4365 const double epsrel = ReferenceTolerance;
4366 double radius_source = 1.0;
4367 double radius_target = 1.0;
4377 #ifdef MOAB_HAVE_MPI
4382 TempestMapAppData& tdata = data_intx.tempestData;
4386 #ifdef MOAB_HAVE_MPI
4389 rank = pco_intx->
rank();
4395 outputFormatter.set_prefix(
"[iMOAB_ComputeCoverageMesh]: " );
4401 rval = ComputeSphereRadius( pid_src, &radius_source );
MB_CHK_ERR( rval );
4402 rval = ComputeSphereRadius( pid_tgt, &radius_target );
MB_CHK_ERR( rval );
4405 outputFormatter.printf( 0,
"Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4411 if( fabs( radius_source - radius_target ) > 1e-10 )
4418 #ifdef MOAB_HAVE_TEMPESTREMAP
4462 outputFormatter.printf( 0,
"The source set contains %zu vertices and %zu elements\n", rintxverts.
size(),
4463 rintxelems.
size() );
4464 outputFormatter.printf( 0,
"The target set contains %zu vertices and %zu elements\n", bintxverts.
size(),
4465 bintxelems.
size() );
4475 tdata.pid_src = pid_src;
4476 tdata.pid_dest = pid_tgt;
4479 #ifdef MOAB_HAVE_MPI
4484 tdata.remapper->meshValidate = validate;
4485 tdata.remapper->constructEdgeMap =
true;
4488 tdata.remapper->initialize(
false );
4494 if( tdata.num_src_ghost_layers >= 1 ) gnomonic =
false;
4496 tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false, gnomonic, tdata.num_src_ghost_layers );
MB_CHK_ERR( rval );
4498 #ifdef MOAB_HAVE_TEMPESTREMAP
4505 #ifdef MOAB_HAVE_TEMPESTREMAP
4514 if( data.tempestData.remapper !=
nullptr )
4516 else if( data.
file_set != data.secondary_file_set )
4517 fileSet = data.secondary_file_set;
4519 MB_CHK_SET_ERR( moab::MB_FAILURE,
"Invalid secondary file set handle" );
4521 std::ostringstream file_name;
4522 int rank = 0,
size = 1;
4524 #ifdef MOAB_HAVE_MPI
4526 rank = pcomm->
rank();
4530 file_name << prefix <<
"_" <<
size <<
"_" << rank <<
".h5m";
4540 constexpr
bool validate =
true;
4541 constexpr
bool use_kdtree_search =
true;
4542 constexpr
double defaultradius = 1.0;
4548 #ifdef MOAB_HAVE_MPI
4553 TempestMapAppData& tdata = data_intx.tempestData;
4555 if( tdata.remapper ==
nullptr )
4560 MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_src, pid_tgt, pid_intx ) );
4564 #ifdef MOAB_HAVE_MPI
4565 rank = pco_intx->
rank();
4568 outputFormatter.set_prefix(
"[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4572 MB_CHK_ERR( tdata.remapper->ComputeOverlapMesh( use_kdtree_search,
false ) );
4579 double local_areas[3] = { 0.0, 0.0, 0.0 }, global_areas[3] = { 0.0, 0.0, 0.0 };
4580 local_areas[0] = areaAdaptor.area_on_sphere(
context.
MBI, data_src.
file_set, defaultradius );
4581 local_areas[1] = areaAdaptor.area_on_sphere(
context.
MBI, data_tgt.
file_set, defaultradius );
4582 local_areas[2] = areaAdaptor.area_on_sphere(
context.
MBI, data_intx.
file_set, defaultradius );
4583 #ifdef MOAB_HAVE_MPI
4584 global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4585 MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->
comm() );
4587 global_areas[0] = local_areas[0];
4588 global_areas[1] = local_areas[1];
4589 global_areas[2] = local_areas[2];
4594 outputFormatter.printf( 0,
4595 "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4596 "overlap mesh = %12.14f\n",
4597 global_areas[0], global_areas[1], global_areas[2] );
4598 outputFormatter.printf( 0,
" relative error w.r.t source = %12.14e, and target = %12.14e\n",
4599 fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4600 fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4612 double radius_source = 1.0;
4613 double radius_target = 1.0;
4614 const double epsrel = ReferenceTolerance;
4615 const double boxeps = 1.e-8;
4621 #ifdef MOAB_HAVE_MPI
4626 TempestMapAppData& tdata = data_intx.tempestData;
4629 #ifdef MOAB_HAVE_MPI
4640 ComputeSphereRadius( pid_src, &radius_source );
4641 ComputeSphereRadius( pid_tgt, &radius_target );
4652 std::cout <<
"The red set contains " << rintxverts.
size() <<
" vertices and " << rintxelems.
size()
4662 std::cout <<
"The blue set contains " << bintxverts.
size() <<
" vertices and " << bintxelems.
size()
4670 tdata.pid_src = pid_src;
4671 tdata.pid_dest = pid_tgt;
4676 #ifdef MOAB_HAVE_MPI
4681 tdata.remapper->meshValidate =
true;
4682 tdata.remapper->constructEdgeMap =
true;
4685 tdata.remapper->initialize(
false );
4692 if( fabs( radius_source - radius_target ) > 1e-10 )
4702 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false );
MB_CHK_ERR( rval );
4704 #ifdef MOAB_HAVE_MPI
4718 ErrCode iMOAB_ComputeScalarProjectionWeights(
4722 int* disc_order_source,
4724 int* disc_order_target,
4727 int* fMonotoneTypeID,
4729 int* fInverseDistanceMap,
4730 int* fNoConservation,
4735 assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4736 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4737 assert( disc_method_source && strlen( disc_method_source ) );
4738 assert( disc_method_target && strlen( disc_method_target ) );
4739 assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4740 assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4744 TempestMapAppData& tdata = data_intx.tempestData;
4748 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
4752 assert( weightMap !=
nullptr );
4754 GenerateOfflineMapAlgorithmOptions mapOptions;
4755 mapOptions.nPin = *disc_order_source;
4756 mapOptions.nPout = *disc_order_target;
4757 mapOptions.fSourceConcave =
false;
4758 mapOptions.fTargetConcave =
false;
4760 mapOptions.strMethod =
"";
4761 if( fv_method ) mapOptions.strMethod += std::string( fv_method ) +
";";
4762 if( fMonotoneTypeID )
4764 switch( *fMonotoneTypeID )
4767 mapOptions.fMonotone =
false;
4770 mapOptions.strMethod +=
"mono3;";
4773 mapOptions.strMethod +=
"mono2;";
4776 mapOptions.fMonotone =
true;
4780 mapOptions.fMonotone =
false;
4782 mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4783 mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4784 mapOptions.fNoCorrectAreas =
false;
4786 mapOptions.fNoCheck =
true;
4787 if( fVolumetric && *fVolumetric ) mapOptions.strMethod +=
"volumetric;";
4788 if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod +=
"invdist;";
4790 std::string metadataStr = mapOptions.strMethod +
";" + std::string( disc_method_source ) +
":" +
4791 std::to_string( *disc_order_source ) +
":" + std::string( source_solution_tag_dof_name ) +
4792 ";" + std::string( disc_method_target ) +
":" + std::to_string( *disc_order_target ) +
4793 ":" + std::string( target_solution_tag_dof_name );
4795 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4801 std::string( disc_method_source ),
4802 std::string( disc_method_target ),
4804 std::string( source_solution_tag_dof_name ),
4805 std::string( target_solution_tag_dof_name )
4811 if( fValidate && *fValidate )
4813 const double dNormalTolerance = 1.0E-8;
4814 const double dStrictTolerance = 1.0E-12;
4815 weightMap->CheckMap(
true,
true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4821 ErrCode iMOAB_ApplyScalarProjectionWeights(
4828 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4829 assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4830 assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4836 TempestMapAppData& tdata = data_intx.tempestData;
4838 if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) )
4843 std::vector< std::string > srcNames;
4844 std::vector< std::string > tgtNames;
4845 std::vector< Tag > srcTagHandles;
4846 std::vector< Tag > tgtTagHandles;
4847 std::string separator(
":" );
4848 std::string src_name( source_solution_tag_name );
4849 std::string tgt_name( target_solution_tag_name );
4852 if( srcNames.size() != tgtNames.size() )
4854 std::cout <<
" error in parsing source and target tag names. \n";
4855 return moab::MB_FAILURE;
4859 for(
size_t i = 0; i < srcNames.size(); i++ )
4864 if(
MB_SUCCESS != rval ||
nullptr == tagHandle )
4868 srcTagHandles.push_back( tagHandle );
4872 if(
MB_SUCCESS != rval ||
nullptr == tagHandle )
4876 tgtTagHandles.push_back( tagHandle );
4882 std::vector< double > solSTagVals;
4883 std::vector< double > solTTagVals;
4893 solSTagVals.resize( covSrcEnts.
size(), 0. );
4906 solTTagVals.resize( tgtEnts.
size(), 0. );
4936 switch( *filter_type )
4952 for(
size_t i = 0; i < srcTagHandles.size(); i++ )
4957 rval = weightMap->
ApplyWeights( srcTagHandles[i], tgtTagHandles[i],
false, caasType );
MB_CHK_ERR( rval );
4966 Tag ssolnTag = srcTagHandles[ivar];
4967 std::stringstream sstr;
4968 sstr <<
"covsrcTagData_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
4969 std::ofstream output_file( sstr.str().c_str() );
4970 for(
unsigned i = 0; i < sents.
size(); ++i )
4973 std::vector< double > locsolSTagVals( 16 );
4976 for(
unsigned j = 0; j < 16; ++j )
4977 output_file << locsolSTagVals[j] <<
" ";
4979 output_file.flush();
4980 output_file.close();
4983 std::stringstream sstr;
4984 sstr <<
"outputSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4990 std::stringstream sstr;
4991 sstr <<
"outputCovSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4998 std::stringstream sstr;
4999 sstr <<
"covMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
5005 std::stringstream sstr;
5006 sstr <<
"tgtMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
5012 std::stringstream sstr;
5013 sstr <<
"colvector_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
5014 std::ofstream output_file( sstr.str().c_str() );
5015 for(
unsigned i = 0; i < solSTagVals.size(); ++i )
5016 output_file << i <<
" " << weightMap->col_dofmap[i] <<
" " << weightMap->
col_gdofmap[i] <<
" "
5017 << solSTagVals[i] <<
"\n";
5018 output_file.flush();
5019 output_file.close();