20 #include <H5Tpublic.h>
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
57 std::map< std::string, moab::TempestOnlineMap* > weightMaps;
60 int num_src_ghost_layers;
61 int num_tgt_ghost_layers;
96 std::map< int, ParCommGraph* >
pgraph;
99 #ifdef MOAB_HAVE_TEMPESTREMAP
100 TempestMapAppData tempestData;
101 std::map< std::string, std::string > metadataMap;
154 for(
int i = 0; i < 4; i++ )
170 MPI_Initialized( &flagInit );
218 std::string name( app_name );
222 std::cout <<
" application " << name <<
" already registered \n";
223 return moab::MB_FAILURE;
230 MPI_Comm_rank( *comm, &rankHere );
232 if( !rankHere ) std::cout <<
" application " << name <<
" with ID = " << *pid <<
" is registered now \n";
235 std::cout <<
" convention for external application is to have its id positive \n";
236 return moab::MB_FAILURE;
241 std::cout <<
" external application with comp id " << *compid <<
" is already registered\n";
242 return moab::MB_FAILURE;
254 int index = pco->
get_id();
255 assert( index == *pid );
274 app_data.
name = name;
276 #ifdef MOAB_HAVE_TEMPESTREMAP
277 app_data.tempestData.remapper = NULL;
278 app_data.tempestData.num_src_ghost_layers = 0;
279 app_data.tempestData.num_tgt_ghost_layers = 0;
307 assert( app_name !=
nullptr );
308 std::string name( app_name );
317 ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
344 rankHere = pco->
rank();
347 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
348 <<
" is de-registered now \n";
355 fileents.
insert( fileSet );
359 #ifdef MOAB_HAVE_TEMPESTREMAP
360 if( data.tempestData.remapper )
delete data.tempestData.remapper;
361 if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
369 std::map< int, ParCommGraph* >& pargs = data.
pgraph;
372 for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
378 if( pco )
delete pco;
392 if( !adj_ents_left.
empty() )
396 vertices =
subtract( vertices, conn_verts );
401 std::map< std::string, int >::iterator mit;
405 int pidx = mit->second;
414 std::map< int, int >::iterator mit1;
418 int pidx = mit1->second;
447 std::string& separator,
448 std::vector< std::string >& list_tag_names )
452 while( ( pos = input_names.find( separator ) ) != std::string::npos )
454 token = input_names.substr( 0, pos );
455 if( !token.empty() ) list_tag_names.push_back( token );
457 input_names.erase( 0, pos + separator.length() );
459 if( !input_names.empty() )
462 list_tag_names.push_back( input_names );
468 int* num_global_vertices,
469 int* num_global_elements,
474 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
476 #ifdef MOAB_HAVE_HDF5
477 std::string filen( filename );
482 if( num_global_vertices ) *num_global_vertices = 0;
483 if( num_global_elements ) *num_global_elements = 0;
484 if( num_dimension ) *num_dimension = 0;
485 if( num_parts ) *num_parts = 0;
489 unsigned long max_id;
492 file =
mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
496 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
497 return moab::MB_FAILURE;
505 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
506 return moab::MB_FAILURE;
510 if( num_global_vertices ) *num_global_vertices = (int)data->
nodes.
count;
519 edges += ent_d->
count;
524 faces += ent_d->
count;
529 faces += ent_d->
count;
534 faces += ent_d->
count;
539 regions += ent_d->
count;
544 regions += ent_d->
count;
549 regions += ent_d->
count;
554 regions += ent_d->
count;
559 regions += ent_d->
count;
564 regions += ent_d->
count;
569 regions += ent_d->
count;
573 if( num_parts ) *num_parts = data->
numEntSets[0];
578 if( num_dimension ) *num_dimension = 1;
579 if( num_global_elements ) *num_global_elements = edges;
584 if( num_dimension ) *num_dimension = 2;
585 if( num_global_elements ) *num_global_elements = faces;
590 if( num_dimension ) *num_dimension = 3;
591 if( num_global_elements ) *num_global_elements = regions;
599 std::cout << filename
600 <<
": Please reconfigure with HDF5. Cannot retrieve header information for file "
601 "formats other than a h5m file.\n";
602 if( num_global_vertices ) *num_global_vertices = 0;
603 if( num_global_elements ) *num_global_elements = 0;
604 if( num_dimension ) *num_dimension = 0;
605 if( num_parts ) *num_parts = 0;
614 int* num_ghost_layers )
617 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
621 std::ostringstream newopts;
622 if( read_options ) newopts << read_options;
630 std::string opts( ( read_options ? read_options :
"" ) );
631 std::string pcid(
"PARALLEL_COMM=" );
632 std::size_t found = opts.find( pcid );
634 if( found != std::string::npos )
636 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
637 return moab::MB_FAILURE;
642 std::string filen( filename );
643 std::string::size_type idx = filen.rfind(
'.' );
645 if( idx != std::string::npos )
647 std::string extension = filen.substr( idx + 1 );
648 if( ( extension == std::string(
"h5m" ) ) || ( extension == std::string(
"nc" ) ) )
649 newopts <<
";;PARALLEL_COMM=" << *pid;
652 if( *num_ghost_layers >= 1 )
656 std::string pcid2(
"PARALLEL_GHOSTS=" );
657 std::size_t found2 = opts.find( pcid2 );
659 if( found2 != std::string::npos )
661 std::cout <<
" PARALLEL_GHOSTS option is already specified, ignore passed "
662 "number of layers \n";
669 newopts <<
";PARALLEL_GHOSTS=3.0." << *num_ghost_layers <<
".3";
675 IMOAB_ASSERT( *num_ghost_layers == 0,
"Cannot provide ghost layers in serial." );
683 std::ostringstream outfile;
687 outfile <<
"TaskMesh_n" << nprocs <<
"." << rank <<
".h5m";
689 outfile <<
"TaskMesh_n1.0.h5m";
706 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
711 std::ostringstream newopts;
713 std::string write_opts( ( write_options ? write_options :
"" ) );
714 std::string pcid(
"PARALLEL_COMM=" );
715 std::size_t found = write_opts.find( pcid );
717 if( found != std::string::npos )
719 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
720 return moab::MB_FAILURE;
724 std::string pw(
"PARALLEL=WRITE_PART" );
725 found = write_opts.find( pw );
727 if( found != std::string::npos )
729 newopts <<
"PARALLEL_COMM=" << *pid <<
";";
734 if( write_options ) newopts << write_options;
736 std::vector< Tag > copyTagList = data.
tagList;
738 std::string gid_name_tag(
"GLOBAL_ID" );
741 if( data.
tagMap.find( gid_name_tag ) == data.
tagMap.end() )
744 copyTagList.push_back( gid );
747 std::string pp_name_tag(
"PARALLEL_PARTITION" );
750 if( data.
tagMap.find( pp_name_tag ) == data.
tagMap.end() )
754 if( ptag ) copyTagList.push_back( ptag );
767 IMOAB_ASSERT( strlen( prefix ),
"Invalid prefix string length." );
769 std::ostringstream file_name;
770 int rank = 0,
size = 1;
775 file_name << prefix <<
"_" <<
size <<
"_" << rank <<
".h5m";
850 int local[2], global[2];
853 MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->
comm() );
883 int* num_visible_vertices,
884 int* num_visible_elements,
885 int* num_visible_blocks,
886 int* num_visible_surfaceBC,
887 int* num_visible_vertexBC )
895 if( num_visible_elements )
899 num_visible_elements[0] =
static_cast< int >( data.
owned_elems.
size() );
900 num_visible_elements[1] =
static_cast< int >( data.
ghost_elems.
size() );
902 if( num_visible_vertices )
904 num_visible_vertices[2] =
static_cast< int >( data.
all_verts.
size() );
907 num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
910 if( num_visible_blocks )
916 num_visible_blocks[0] = num_visible_blocks[2];
917 num_visible_blocks[1] = 0;
920 if( num_visible_surfaceBC )
925 num_visible_surfaceBC[2] = 0;
930 for(
int i = 0; i < numNeuSets; i++ )
939 Range adjPrimaryEnts;
942 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.
size();
946 num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
947 num_visible_surfaceBC[1] = 0;
950 if( num_visible_vertexBC )
955 num_visible_vertexBC[2] = 0;
958 for(
int i = 0; i < numDiriSets; i++ )
964 num_visible_vertexBC[2] += (int)verts.
size();
967 num_visible_vertexBC[0] = num_visible_vertexBC[2];
968 num_visible_vertexBC[1] = 0;
981 IMOAB_ASSERT( *vertices_length ==
static_cast< int >( verts.
size() ),
"Invalid vertices length provided" );
989 assert( vertices_length && *vertices_length );
990 assert( visible_global_rank_ID );
1002 if( i != *vertices_length )
1004 return moab::MB_FAILURE;
1010 if( (
int)verts.
size() != *vertices_length )
1012 return moab::MB_FAILURE;
1017 visible_global_rank_ID[i] = 0;
1030 if( *coords_length != 3 * (
int)verts.
size() )
1032 return moab::MB_FAILURE;
1046 if( *block_length != (
int)matSets.
size() )
1048 return moab::MB_FAILURE;
1056 for(
unsigned i = 0; i < matSets.
size(); i++ )
1058 matIdx[global_block_IDs[i]] = i;
1066 int* vertices_per_element,
1067 int* num_elements_in_block )
1069 assert( global_block_ID );
1072 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1074 if( it == matMap.end() )
1076 return moab::MB_FAILURE;
1079 int blockIndex = matMap[*global_block_ID];
1086 return moab::MB_FAILURE;
1093 return moab::MB_FAILURE;
1100 *vertices_per_element = num_verts;
1101 *num_elements_in_block = (int)blo_elems.
size();
1107 int* num_visible_elements,
1113 #ifdef MOAB_HAVE_MPI
1123 #ifdef MOAB_HAVE_MPI
1148 return moab::MB_FAILURE;
1151 if( -1 >= *num_visible_elements )
1153 return moab::MB_FAILURE;
1156 block_IDs[index] = valMatTag;
1165 int* connectivity_length,
1166 int* element_connectivity )
1168 assert( global_block_ID );
1169 assert( connectivity_length );
1172 std::map< int, int >& matMap = data.
matIndex;
1173 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1175 if( it == matMap.end() )
1177 return moab::MB_FAILURE;
1180 int blockIndex = matMap[*global_block_ID];
1182 std::vector< EntityHandle > elems;
1188 return moab::MB_FAILURE;
1191 std::vector< EntityHandle > vconnect;
1194 if( *connectivity_length != (
int)vconnect.size() )
1196 return moab::MB_FAILURE;
1199 for(
int i = 0; i < *connectivity_length; i++ )
1205 return moab::MB_FAILURE;
1208 element_connectivity[i] = inx;
1216 int* connectivity_length,
1217 int* element_connectivity )
1219 assert( elem_index );
1220 assert( connectivity_length );
1223 assert( ( *elem_index >= 0 ) && ( *elem_index < (
int)data.
primary_elems.
size() ) );
1232 if( *connectivity_length < num_nodes )
1234 return moab::MB_FAILURE;
1237 for(
int i = 0; i < num_nodes; i++ )
1243 return moab::MB_FAILURE;
1246 element_connectivity[i] = index;
1249 *connectivity_length = num_nodes;
1256 int* num_elements_in_block,
1257 int* element_ownership )
1259 assert( global_block_ID );
1260 assert( num_elements_in_block );
1264 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1266 if( it == matMap.end() )
1268 return moab::MB_FAILURE;
1271 int blockIndex = matMap[*global_block_ID];
1279 return moab::MB_FAILURE;
1282 if( *num_elements_in_block != (
int)elems.
size() )
1284 return moab::MB_FAILURE;
1288 #ifdef MOAB_HAVE_MPI
1294 #ifdef MOAB_HAVE_MPI
1297 element_ownership[i] = 0;
1306 int* num_elements_in_block,
1310 assert( global_block_ID );
1311 assert( num_elements_in_block );
1314 std::map< int, int >& matMap = data.
matIndex;
1316 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1318 if( it == matMap.end() )
1320 return moab::MB_FAILURE;
1323 int blockIndex = matMap[*global_block_ID];
1330 return moab::MB_FAILURE;
1333 if( *num_elements_in_block != (
int)elems.
size() )
1335 return moab::MB_FAILURE;
1341 for(
int i = 0; i < *num_elements_in_block; i++ )
1345 if( -1 == local_element_ID[i] )
1347 return moab::MB_FAILURE;
1355 int* surface_BC_length,
1357 int* reference_surface_ID,
1358 int* boundary_condition_value )
1369 for(
int i = 0; i < numNeuSets; i++ )
1381 Range adjPrimaryEnts;
1399 if( -1 == local_element_ID[index] )
1401 return moab::MB_FAILURE;
1408 boundary_condition_value[index] = neuVal;
1414 if( index != *surface_BC_length )
1416 return moab::MB_FAILURE;
1423 int* vertex_BC_length,
1425 int* boundary_condition_value )
1434 for(
int i = 0; i < numDiriSets; i++ )
1453 if( -1 == local_vertex_ID[index] )
1455 return moab::MB_FAILURE;
1458 boundary_condition_value[index] = diriVal;
1463 if( *vertex_BC_length != index )
1465 return moab::MB_FAILURE;
1474 int* components_per_entity,
1478 if( *tag_type < 0 || *tag_type > 5 )
1480 return moab::MB_FAILURE;
1485 void* defaultVal = NULL;
1486 int* defInt =
new int[*components_per_entity];
1487 double* defDouble =
new double[*components_per_entity];
1490 for(
int i = 0; i < *components_per_entity; i++ )
1493 defDouble[i] = -1e+10;
1502 defaultVal = defInt;
1508 defaultVal = defDouble;
1514 defaultVal = defHandle;
1520 defaultVal = defInt;
1526 defaultVal = defDouble;
1532 defaultVal = defHandle;
1539 return moab::MB_FAILURE;
1546 std::string tag_name( tag_storage_name );
1550 std::vector< std::string > tagNames;
1551 std::string separator(
":" );
1556 int already_defined_tags = 0;
1558 for(
size_t i = 0; i < tagNames.size(); i++ )
1565 std::map< std::string, Tag >& mTags = data.
tagMap;
1566 std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1568 if( mit == mTags.end() )
1571 mTags[tagNames[i]] = tagHandle;
1573 *tag_index = (int)data.
tagList.size();
1574 data.
tagList.push_back( tagHandle );
1577 already_defined_tags++;
1581 data.
tagMap[tagNames[i]] = tagHandle;
1582 *tag_index = (int)data.
tagList.size();
1583 data.
tagList.push_back( tagHandle );
1587 rval = moab::MB_FAILURE;
1592 #ifdef MOAB_HAVE_MPI
1594 rankHere = pco->
rank();
1596 if( !rankHere && already_defined_tags )
1597 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
1598 <<
" has " << already_defined_tags <<
" already defined tags out of " << tagNames.size()
1608 int* num_tag_storage_length,
1610 int* tag_storage_data )
1612 std::string tag_name( tag_storage_name );
1617 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
1619 return moab::MB_FAILURE;
1632 return moab::MB_FAILURE;
1638 if( *ent_type == 0 )
1647 int nents_to_be_set = *num_tag_storage_length / tagLength;
1649 if( nents_to_be_set > (
int)ents_to_set->
size() )
1651 return moab::MB_FAILURE;
1665 int* num_tag_storage_length,
1667 int* tag_storage_data )
1670 std::string tag_name( tag_storage_name );
1674 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
1676 return moab::MB_FAILURE;
1689 return moab::MB_FAILURE;
1695 if( *ent_type == 0 )
1704 int nents_to_get = *num_tag_storage_length / tagLength;
1706 if( nents_to_get > (
int)ents_to_get->
size() )
1708 return moab::MB_FAILURE;
1722 int* num_tag_storage_length,
1724 double* tag_storage_data )
1727 std::string tag_names( tag_storage_names );
1729 std::vector< std::string > tagNames;
1730 std::vector< Tag > tagHandles;
1731 std::string separator(
":" );
1735 Range* ents_to_set = NULL;
1737 if( *ent_type == 0 )
1741 else if( *ent_type == 1 )
1746 int nents_to_be_set = (int)( *ents_to_set ).
size();
1749 for(
size_t i = 0; i < tagNames.size(); i++ )
1751 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1753 return moab::MB_FAILURE;
1766 return moab::MB_FAILURE;
1770 if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1771 return moab::MB_FAILURE;
1775 position = position + tagLength * nents_to_be_set;
1782 int* num_tag_storage_length,
1784 double* tag_storage_data,
1788 std::string tag_names( tag_storage_names );
1790 std::vector< std::string > tagNames;
1791 std::vector< Tag > tagHandles;
1792 std::string separator(
":" );
1796 Range* ents_to_set = NULL;
1798 if( *ent_type == 0 )
1802 else if( *ent_type == 1 )
1807 int nents_to_be_set = (int)( *ents_to_set ).
size();
1810 std::vector< int > gids;
1811 gids.resize( nents_to_be_set );
1817 std::map< int, EntityHandle > eh_by_gid;
1821 eh_by_gid[gids[i]] = *it;
1824 std::vector< int > tagLengths( tagNames.size() );
1825 std::vector< Tag > tagList;
1826 size_t total_tag_len = 0;
1827 for(
size_t i = 0; i < tagNames.size(); i++ )
1829 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1831 MB_SET_ERR( moab::MB_FAILURE,
"tag missing" );
1835 tagList.push_back( tag );
1840 total_tag_len += tagLength;
1841 tagLengths[i] = tagLength;
1847 MB_SET_ERR( moab::MB_FAILURE,
"tag not double type" );
1851 #ifdef MOAB_HAVE_MPI
1853 unsigned num_procs = pco->
size();
1854 if( num_procs > 1 ) serial =
false;
1863 for(
int i = 0; i < nents_to_be_set; i++ )
1865 int gid = globalIds[i];
1866 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1867 if( mapIt == eh_by_gid.end() )
continue;
1870 int indexInTagValues = 0;
1871 for(
size_t j = 0; j < tagList.size(); j++ )
1873 indexInTagValues += i * tagLengths[j];
1876 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];
1880 #ifdef MOAB_HAVE_MPI
1887 int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
1891 TLsend.
initialize( 2, 0, 0, total_tag_len, nbLocalVals );
1895 int indexInRealLocal = 0;
1896 for(
int i = 0; i < nbLocalVals; i++ )
1899 int marker = globalIds[i];
1900 int to_proc = marker % num_procs;
1901 int n = TLsend.
get_n();
1902 TLsend.
vi_wr[2 * n] = to_proc;
1903 TLsend.
vi_wr[2 * n + 1] = marker;
1904 int indexInTagValues = 0;
1906 for(
size_t j = 0; j < tagList.size(); j++ )
1908 indexInTagValues += i * tagLengths[j];
1909 for(
int k = 0; k < tagLengths[j]; k++ )
1911 TLsend.
vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1913 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1923 TLreq.
initialize( 2, 0, 0, 0, nents_to_be_set );
1925 for(
int i = 0; i < nents_to_be_set; i++ )
1928 int marker = gids[i];
1929 int to_proc = marker % num_procs;
1930 int n = TLreq.
get_n();
1931 TLreq.
vi_wr[2 * n] = to_proc;
1932 TLreq.
vi_wr[2 * n + 1] = marker;
1943 sort_buffer.buffer_init( TLreq.
get_n() );
1944 TLreq.
sort( 1, &sort_buffer );
1945 sort_buffer.
reset();
1946 sort_buffer.buffer_init( TLsend.
get_n() );
1947 TLsend.
sort( 1, &sort_buffer );
1948 sort_buffer.
reset();
1956 TLBack.
initialize( 3, 0, 0, total_tag_len, 0 );
1959 int n1 = TLreq.
get_n();
1960 int n2 = TLsend.
get_n();
1962 int indexInTLreq = 0;
1963 int indexInTLsend = 0;
1964 if( n1 > 0 && n2 > 0 )
1967 while( indexInTLreq < n1 && indexInTLsend < n2 )
1969 int currentValue1 = TLreq.
vi_rd[2 * indexInTLreq + 1];
1970 int currentValue2 = TLsend.
vi_rd[2 * indexInTLsend + 1];
1971 if( currentValue1 < currentValue2 )
1979 if( currentValue1 > currentValue2 )
1987 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.
vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1989 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.
vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1992 for(
int i1 = 0; i1 < size1; i1++ )
1994 for(
int i2 = 0; i2 < size2; i2++ )
1997 int n = TLBack.
get_n();
1999 TLBack.
vi_wr[3 * n] = TLreq.
vi_rd[2 * ( indexInTLreq + i1 )];
2001 TLBack.
vi_wr[3 * n + 1] = currentValue1;
2002 TLBack.
vi_wr[3 * n + 2] = TLsend.
vi_rd[2 * ( indexInTLsend + i2 )];
2004 for(
size_t k = 0; k < total_tag_len; k++ )
2006 TLBack.
vr_rd[total_tag_len * n + k] =
2007 TLsend.
vr_rd[total_tag_len * indexInTLsend + k];
2011 indexInTLreq += size1;
2012 indexInTLsend += size2;
2018 n1 = TLBack.
get_n();
2019 double* ptrVal = &TLBack.
vr_rd[0];
2020 for(
int i = 0; i < n1; i++ )
2022 int gid = TLBack.
vi_rd[3 * i + 1];
2023 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2024 if( mapIt == eh_by_gid.end() )
continue;
2028 for(
size_t j = 0; j < tagList.size(); j++ )
2032 ptrVal += tagLengths[j];
2041 int* num_tag_storage_length,
2043 double* tag_storage_data )
2047 std::string tag_names( tag_storage_names );
2049 std::vector< std::string > tagNames;
2050 std::vector< Tag > tagHandles;
2051 std::string separator(
":" );
2057 Range* ents_to_get = NULL;
2059 if( *ent_type == 0 )
2063 else if( *ent_type == 1 )
2067 int nents_to_get = (int)ents_to_get->
size();
2069 for(
size_t i = 0; i < tagNames.size(); i++ )
2071 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
2073 return moab::MB_FAILURE;
2086 return moab::MB_FAILURE;
2089 if( position + nents_to_get * tagLength > *num_tag_storage_length )
2090 return moab::MB_FAILURE;
2093 position = position + nents_to_get * tagLength;
2101 #ifdef MOAB_HAVE_MPI
2104 std::vector< Tag > tags;
2106 for(
int i = 0; i < *num_tag; i++ )
2108 if( tag_indices[i] < 0 || tag_indices[i] >= (
int)data.
tagList.size() )
2110 return moab::MB_FAILURE;
2113 tags.push_back( data.
tagList[tag_indices[i]] );
2116 if( *ent_type == 0 )
2120 else if( *ent_type == 1 )
2126 return moab::MB_FAILURE;
2137 int k = *pid + *num_tag + *tag_indices + *ent_type;
2146 #ifdef MOAB_HAVE_MPI
2150 if( *tag_index < 0 || *tag_index >= (
int)data.
tagList.size() )
2152 return moab::MB_FAILURE;
2157 if( *ent_type == 0 )
2161 else if( *ent_type == 1 )
2167 return moab::MB_FAILURE;
2178 int k = *pid + *tag_index + *ent_type;
2186 int* num_adjacent_elements,
2198 if( *num_adjacent_elements < (
int)adjs.
size() )
2200 return moab::MB_FAILURE;
2203 *num_adjacent_elements = (int)adjs.
size();
2205 for(
int i = 0; i < *num_adjacent_elements; i++ )
2229 return moab::MB_FAILURE;
2232 int nverts = *coords_len / *
dim;
2246 int* num_nodes_per_element,
2256 EntityType mbtype = (EntityType)( *type );
2265 for(
int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2267 array[j] = connectivity[j] + firstVertex - 1;
2270 Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2281 int set_no = *block_ID;
2282 const void* setno_ptr = &set_no;
2287 if( MB_FAILURE == rval || sets.
empty() )
2299 block_set = sets[0];
2319 if( NULL != num_global_verts )
2323 if( NULL != num_global_elems )
2331 #ifdef MOAB_HAVE_MPI
2357 return moab::MB_FAILURE;
2374 std::cout <<
" can't get par part tag.\n";
2375 return moab::MB_FAILURE;
2378 int rank = pco->
rank();
2390 if( num_ghost_layers && *num_ghost_layers <= 0 )
2399 constexpr
int addl_ents = 0;
2402 for(
int i = 2; i <= *num_ghost_layers; i++ )
2418 assert( join !=
nullptr );
2419 assert( receivingGroup !=
nullptr );
2420 assert( rcompid !=
nullptr );
2427 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2428 MPI_Group recvGroup =
2429 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( receivingGroup ) ) : *receivingGroup );
2430 MPI_Comm sender = pco->
comm();
2432 MPI_Group senderGroup;
2433 ierr = MPI_Comm_group( sender, &senderGroup );
2434 if( ierr != 0 )
return moab::MB_FAILURE;
2445 int sender_rank = -1;
2446 MPI_Comm_rank( sender, &sender_rank );
2451 std::vector< int > number_elems_per_part;
2455 if( owned.
size() == 0 )
2464 int local_owned_elem = (int)owned.
size();
2466 int rank = pco->
rank();
2467 number_elems_per_part.resize(
size );
2468 number_elems_per_part[rank] = local_owned_elem;
2469 #if( MPI_VERSION >= 2 )
2471 ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2474 std::vector< int > all_tmp(
size );
2475 ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2476 number_elems_per_part = all_tmp;
2482 return moab::MB_FAILURE;
2503 MPI_Group_free( &senderGroup );
2509 assert( join !=
nullptr );
2510 assert( sendingGroup !=
nullptr );
2511 assert( scompid !=
nullptr );
2516 MPI_Comm receive = pco->
comm();
2519 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2520 MPI_Group sendGroup =
2521 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( sendingGroup ) ) : *sendingGroup );
2525 MPI_Group receiverGroup;
2526 int ierr = MPI_Comm_group( receive, &receiverGroup );
CHK_MPI_ERR( ierr );
2535 int receiver_rank = -1;
2536 MPI_Comm_rank( receive, &receiver_rank );
2540 std::vector< int > pack_array;
2544 int current_receiver = cgraph->
receiver( receiver_rank );
2546 std::vector< int > senders_local;
2549 while( n < pack_array.size() )
2551 if( current_receiver == pack_array[n] )
2553 for(
int j = 0; j < pack_array[n + 1]; j++ )
2555 senders_local.push_back( pack_array[n + 2 + j] );
2561 n = n + 2 + pack_array[n + 1];
2565 std::cout <<
" receiver " << current_receiver <<
" at rank " << receiver_rank <<
" will receive from "
2566 << senders_local.size() <<
" tasks: ";
2568 for(
int k = 0; k < (int)senders_local.size(); k++ )
2570 std::cout <<
" " << senders_local[k];
2576 if( senders_local.empty() )
2578 std::cout <<
" we do not have any senders for receiver rank " << receiver_rank <<
"\n";
2594 if( (
int)senders_local.size() >= 2 )
2605 std::cout <<
"current_receiver " << current_receiver <<
" local verts: " << local_verts.
size() <<
"\n";
2615 std::cout <<
"after merging: new verts: " << new_verts.
size() <<
"\n";
2633 if( NULL != densePartTag &&
MB_SUCCESS == rval )
2637 std::vector< int > vals;
2638 int rank = pco->
rank();
2639 vals.resize( local_verts.
size(), rank );
2651 std::cout <<
" can't get par part tag.\n";
2652 return moab::MB_FAILURE;
2655 int rank = pco->
rank();
2667 MPI_Group_free( &receiverGroup );
2675 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2676 if( mt == data.
pgraph.end() )
2678 return moab::MB_FAILURE;
2686 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2695 #ifdef MOAB_HAVE_TEMPESTREMAP
2696 if( data.tempestData.remapper != NULL )
2710 if( 0 != cover_set )
2716 std::string tag_name( tag_storage_name );
2721 std::vector< std::string > tagNames;
2722 std::vector< Tag > tagHandles;
2723 std::string separator(
":" );
2725 for(
size_t i = 0; i < tagNames.size(); i++ )
2729 if(
MB_SUCCESS != rval || NULL == tagHandle )
2731 std::cout <<
" can't get tag handle for tag named:" << tagNames[i].c_str() <<
" at index " << i <<
"\n";
MB_CHK_SET_ERR( rval,
"can't get tag handle" );
2733 tagHandles.push_back( tagHandle );
2748 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2749 if( mt == data.
pgraph.end() )
2751 return moab::MB_FAILURE;
2755 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2765 if( 0 != cover_set )
2776 #ifdef MOAB_HAVE_TEMPESTREMAP
2777 if( data.tempestData.remapper != NULL )
2790 std::string tag_name( tag_storage_name );
2793 std::vector< std::string > tagNames;
2794 std::vector< Tag > tagHandles;
2795 std::string separator(
":" );
2797 for(
size_t i = 0; i < tagNames.size(); i++ )
2801 if(
MB_SUCCESS != rval || NULL == tagHandle )
2803 std::cout <<
" can't get tag handle for tag named:" << tagNames[i].c_str() <<
" at index " << i <<
"\n";
MB_CHK_SET_ERR( rval,
"can't get tag handle" );
2805 tagHandles.push_back( tagHandle );
2809 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name
2810 <<
" and file set = " << ( data.
file_set ) <<
"\n";
2817 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name <<
"\n";
2828 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *context_id );
2829 if( mt == data.
pgraph.end() )
return moab::MB_FAILURE;
2831 mt->second->release_send_buffers();
2854 int localRank = 0, numProcs = 1;
2856 bool isFortran =
false;
2857 if( *pid1 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid1].is_fortran;
2858 if( *pid2 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid2].is_fortran;
2860 MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2861 MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group1 ) ) : *group1 );
2862 MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group2 ) ) : *group2 );
2864 MPI_Comm_rank( global, &localRank );
2865 MPI_Comm_size( global, &numProcs );
2870 bool already_exists =
false;
2874 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *comp2 );
2875 if( mt != data.
pgraph.end() ) already_exists =
true;
2880 std::map< int, ParCommGraph* >::iterator mt = data.
pgraph.find( *comp1 );
2881 if( mt != data.
pgraph.end() ) already_exists =
true;
2884 if( already_exists )
2888 std::cout <<
" parcomgraph already existing between components " << *comp1 <<
" and " << *comp2
2889 <<
". Do not compute again\n";
2897 if( *pid1 >= 0 ) cgraph =
new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2899 if( *pid2 >= 0 ) cgraph_rev =
new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2916 int lenTagType1 = 1;
2917 if( 1 == *type1 || 1 == *type2 )
2924 std::vector< int > valuesComp1;
2931 #ifdef MOAB_HAVE_TEMPESTREMAP
2932 if( data1.tempestData.remapper != NULL )
2938 Range ents_of_interest;
2943 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
2946 else if( *type1 == 2 )
2949 valuesComp1.resize( ents_of_interest.
size() );
2952 else if( *type1 == 3 )
2955 valuesComp1.resize( ents_of_interest.
size() );
2964 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2965 TLcomp1.
resize( uniq.size() );
2966 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2970 int to_proc = marker % numProcs;
2971 int n = TLcomp1.
get_n();
2972 TLcomp1.
vi_wr[2 * n] = to_proc;
2973 TLcomp1.
vi_wr[2 * n + 1] = marker;
2983 std::stringstream ff1;
2984 ff1 <<
"TLcomp1_" << localRank <<
".txt";
2988 sort_buffer.buffer_init( TLcomp1.
get_n() );
2989 TLcomp1.
sort( 1, &sort_buffer );
2990 sort_buffer.
reset();
2999 std::vector< int > valuesComp2;
3005 #ifdef MOAB_HAVE_TEMPESTREMAP
3006 if( data2.tempestData.remapper != NULL )
3013 Range ents_of_interest;
3018 valuesComp2.resize( ents_of_interest.
size() * lenTagType1 );
3021 else if( *type2 == 2 )
3024 valuesComp2.resize( ents_of_interest.
size() );
3027 else if( *type2 == 3 )
3030 valuesComp2.resize( ents_of_interest.
size() );
3038 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3039 TLcomp2.
resize( uniq.size() );
3040 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3044 int to_proc = marker % numProcs;
3045 int n = TLcomp2.
get_n();
3046 TLcomp2.
vi_wr[2 * n] = to_proc;
3047 TLcomp2.
vi_wr[2 * n + 1] = marker;
3055 std::stringstream ff2;
3056 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3059 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3060 TLcomp2.
sort( 1, &sort_buffer );
3061 sort_buffer.
reset();
3083 int n1 = TLcomp1.
get_n();
3084 int n2 = TLcomp2.
get_n();
3086 int indexInTLComp1 = 0;
3087 int indexInTLComp2 = 0;
3088 if( n1 > 0 && n2 > 0 )
3091 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3093 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
3094 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
3095 if( currentValue1 < currentValue2 )
3103 if( currentValue1 > currentValue2 )
3111 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3113 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3116 for(
int i1 = 0; i1 < size1; i1++ )
3118 for(
int i2 = 0; i2 < size2; i2++ )
3121 int n = TLBackToComp1.
get_n();
3123 TLBackToComp1.
vi_wr[3 * n] =
3124 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3126 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
3127 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3128 n = TLBackToComp2.
get_n();
3130 TLBackToComp2.
vi_wr[3 * n] =
3131 TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3132 TLBackToComp2.
vi_wr[3 * n + 1] = currentValue1;
3133 TLBackToComp2.
vi_wr[3 * n + 2] = TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3137 indexInTLComp1 += size1;
3138 indexInTLComp2 += size2;
3151 std::stringstream f1;
3152 f1 <<
"TLBack1_" << localRank <<
".txt";
3155 sort_buffer.buffer_reserve( TLBackToComp1.
get_n() );
3156 TLBackToComp1.
sort( 1, &sort_buffer );
3157 sort_buffer.
reset();
3172 std::stringstream f2;
3173 f2 <<
"TLBack2_" << localRank <<
".txt";
3176 sort_buffer.buffer_reserve( TLBackToComp2.
get_n() );
3177 TLBackToComp2.
sort( 2, &sort_buffer );
3178 sort_buffer.
reset();
3196 std::vector< Tag > tagsList;
3199 if( !tag || rval !=
MB_SUCCESS )
return moab::MB_FAILURE;
3200 tagsList.push_back( tag );
3202 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3204 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3205 double tol = 1.0e-9;
3241 std::cout <<
" can't get par part tag.\n";
3242 return moab::MB_FAILURE;
3245 int rank = pco->
rank();
3251 #ifdef MOAB_HAVE_TEMPESTREMAP
3258 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
3269 std::vector< int > srcSenders;
3270 std::vector< int > receivers;
3273 int default_context_id = -1;
3274 bool is_fortran_context =
false;
3280 default_context_id = *migr_id;
3282 is_fortran_context =
context.
appDatas[*pid_src].is_fortran || is_fortran_context;
3286 srcSenders = sendGraph->
senders();
3289 std::cout <<
"senders: " << srcSenders.size() <<
" first sender: " << srcSenders[0] << std::endl;
3294 if( *pid_migr >= 0 )
3296 is_fortran_context =
context.
appDatas[*pid_migr].is_fortran || is_fortran_context;
3298 default_context_id = *src_id;
3302 srcSenders = recvGraph->
senders();
3305 std::cout <<
"receivers: " << receivers.size() <<
" first receiver: " << receivers[0] << std::endl;
3309 if( *pid_intx >= 0 ) is_fortran_context =
context.
appDatas[*pid_intx].is_fortran || is_fortran_context;
3322 MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
3323 int currentRankInJointComm = -1;
3324 ierr = MPI_Comm_rank( global, ¤tRankInJointComm );
CHK_MPI_ERR( ierr );
3328 if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3332 if( *pid_intx >= (
int)
context.
appDatas.size() )
return moab::MB_FAILURE;
3335 Tag parentTag, orgSendProcTag;
3338 if( !parentTag )
return moab::MB_FAILURE;
3341 if( !orgSendProcTag )
return moab::MB_FAILURE;
3349 std::map< int, std::set< int > > idsFromProcs;
3353 int gidCell, origProc;
3359 if( origProc < 0 )
continue;
3361 std::set< int >& setInts = idsFromProcs[origProc];
3362 setInts.insert( gidCell );
3371 assert( *pid_intx >= 0 );
3383 int gidCell, origProc;
3391 std::set< int >& setInts = idsFromProcs[origProc];
3392 setInts.insert( gidCell );
3397 std::ofstream dbfile;
3398 std::stringstream outf;
3399 outf <<
"idsFromProc_0" << currentRankInJointComm <<
".txt";
3400 dbfile.open( outf.str().c_str() );
3401 dbfile <<
"Writing this to a file.\n";
3403 dbfile <<
" map size:" << idsFromProcs.size()
3407 for( std::map<
int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3409 std::set< int >& setIds = mt->second;
3410 dbfile <<
"from id: " << mt->first <<
" receive " << setIds.size() <<
" cells \n";
3412 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3415 dbfile <<
" " << valueID;
3417 if( counter % 10 == 0 ) dbfile <<
"\n";
3423 if( NULL != recvGraph )
3430 assert( *pid_intx >= 0 );
3436 for( std::map<
int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
3438 int procToSendTo = mit->first;
3439 std::set< int >& idSet = mit->second;
3440 for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
3442 int n = TLcovIDs.
get_n();
3444 TLcovIDs.
vi_wr[2 * n] = procToSendTo;
3445 TLcovIDs.
vi_wr[2 * n + 1] = *sit;
3451 pc.crystal_router()->gs_transfer( 1, TLcovIDs,
3456 if( NULL != sendGraph )
3470 assert( prefix && strlen( prefix ) );
3473 std::string prefix_str( prefix );
3475 if( NULL != cgraph )
3479 std::cout <<
" cannot find ParCommGraph on app with pid " << *pid <<
" name: " <<
context.
appDatas[*pid].name
3480 <<
" context: " << *context_id <<
"\n";
3489 #ifdef MOAB_HAVE_TEMPESTREMAP
3491 #ifdef MOAB_HAVE_NETCDF
3492 ErrCode iMOAB_LoadMappingWeightsFromFile(
3501 bool row_based_partition =
true;
3502 if( *col_or_row == 1 ) row_based_partition =
false;
3508 TempestMapAppData& tdata = data_intx.tempestData;
3511 if( tdata.remapper == NULL )
3514 #ifdef MOAB_HAVE_MPI
3520 tdata.remapper->meshValidate =
true;
3521 tdata.remapper->constructEdgeMap =
true;
3523 tdata.remapper->initialize(
false );
3529 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
3533 assert( weightMap != NULL );
3544 int lenTagType1 = 1;
3552 std::vector< int > dofValues;
3563 dofValues.resize( ents_of_interest.
size() * lenTagType1 );
3565 ndofPerEl = lenTagType1;
3567 else if( *type == 2 )
3570 dofValues.resize( ents_of_interest.
size() );
3573 else if( *type == 3 )
3576 dofValues.resize( ents_of_interest.
size() );
3584 std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
3586 std::sort( orderDofs.begin(), orderDofs.end() );
3587 orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() );
3592 if( row_based_partition )
3594 tdata.pid_dest = pid_cpl;
3601 tdata.pid_src = pid_cpl;
3609 std::vector< int > tmp_owned_ids;
3616 ErrCode iMOAB_WriteMappingWeightsToFile(
3621 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3622 assert( remap_weights_filename && strlen( remap_weights_filename ) );
3628 TempestMapAppData& tdata = data_intx.tempestData;
3631 assert( tdata.remapper != NULL );
3635 assert( weightMap != NULL );
3637 std::string filename = std::string( remap_weights_filename );
3639 std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3640 std::map< std::string, std::string > attrMap;
3641 attrMap[
"title"] =
"MOAB-TempestRemap Online Regridding Weight Generator";
3642 attrMap[
"normalization"] =
"ovarea";
3643 attrMap[
"map_aPb"] = filename;
3645 const std::string delim =
";";
3646 size_t pos = 0, index = 0;
3647 std::vector< std::string > stringAttr( 3 );
3649 while( ( pos = metadataStr.find( delim ) ) != std::string::npos )
3651 std::string token1 = metadataStr.substr( 0, pos );
3652 if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1;
3654 0, pos + delim.length() );
3656 stringAttr[index] = metadataStr;
3657 assert( index == 2 );
3658 attrMap[
"remap_options"] = stringAttr[0];
3659 attrMap[
"methodorder_b"] = stringAttr[1];
3660 attrMap[
"methodorder_a"] = stringAttr[2];
3661 attrMap[
"concave_a"] =
"false";
3662 attrMap[
"concave_b"] =
"false";
3663 attrMap[
"bubble"] =
"true";
3672 #ifdef MOAB_HAVE_MPI
3676 MPI_Comm* jointcomm,
3684 assert( jointcomm );
3687 bool is_fortran =
false;
3688 if( *pid1 >= 0 ) is_fortran =
context.
appDatas[*pid1].is_fortran || is_fortran;
3689 if( *pid2 >= 0 ) is_fortran =
context.
appDatas[*pid2].is_fortran || is_fortran;
3690 if( *pid3 >= 0 ) is_fortran =
context.
appDatas[*pid3].is_fortran || is_fortran;
3692 MPI_Comm joint_communicator =
3693 ( is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( jointcomm ) ) : *jointcomm );
3694 MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( groupA ) ) : *groupA );
3695 MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( groupB ) ) : *groupB );
3698 int localRank = 0, numProcs = 1;
3701 MPI_Comm_rank( joint_communicator, &localRank );
3702 MPI_Comm_size( joint_communicator, &numProcs );
3708 if( *pid1 >= 0 ) cgraph =
new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3711 if( *pid3 >= 0 ) cgraph_rev =
new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3732 int lenTagType1 = 1;
3740 std::vector< int > valuesComp1;
3743 Range ents_of_interest;
3754 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
3757 else if( *type == 2 )
3760 valuesComp1.resize( ents_of_interest.
size() );
3763 else if( *type == 3 )
3766 valuesComp1.resize( ents_of_interest.
size() );
3775 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3776 TLcomp1.
resize( uniq.size() );
3777 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3781 int to_proc = marker % numProcs;
3782 int n = TLcomp1.
get_n();
3783 TLcomp1.
vi_wr[2 * n] = to_proc;
3784 TLcomp1.
vi_wr[2 * n + 1] = marker;
3790 pc.crystal_router()->gs_transfer( 1, TLcomp1,
3794 std::stringstream ff1;
3795 ff1 <<
"TLcomp1_" << localRank <<
".txt";
3799 sort_buffer.buffer_init( TLcomp1.
get_n() );
3800 TLcomp1.
sort( 1, &sort_buffer );
3801 sort_buffer.
reset();
3813 std::vector< int > valuesComp2;
3817 TempestMapAppData& tdata = data2.tempestData;
3819 assert( tdata.weightMaps.size() == 1 );
3821 weightMap = tdata.weightMaps.begin()->second;
3824 if( *direction == 1 )
3830 else if( *direction == 2 )
3838 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3839 TLcomp2.
resize( uniq.size() );
3840 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3844 int to_proc = marker % numProcs;
3845 int n = TLcomp2.
get_n();
3846 TLcomp2.
vi_wr[2 * n] = to_proc;
3847 TLcomp2.
vi_wr[2 * n + 1] = marker;
3851 pc.crystal_router()->gs_transfer( 1, TLcomp2,
3856 std::stringstream ff2;
3857 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3860 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3861 TLcomp2.
sort( 1, &sort_buffer );
3862 sort_buffer.
reset();
3884 int n1 = TLcomp1.
get_n();
3885 int n2 = TLcomp2.
get_n();
3887 int indexInTLComp1 = 0;
3888 int indexInTLComp2 = 0;
3889 if( n1 > 0 && n2 > 0 )
3892 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3894 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
3895 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
3896 if( currentValue1 < currentValue2 )
3904 if( currentValue1 > currentValue2 )
3912 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3914 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3917 for(
int i1 = 0; i1 < size1; i1++ )
3919 for(
int i2 = 0; i2 < size2; i2++ )
3922 int n = TLBackToComp1.
get_n();
3924 TLBackToComp1.
vi_wr[3 * n] =
3925 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3927 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
3928 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3929 n = TLBackToComp2.
get_n();
3931 TLBackToComp2.
vi_wr[3 * n] =
3932 TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3933 TLBackToComp2.
vi_wr[3 * n + 1] = currentValue1;
3934 TLBackToComp2.
vi_wr[3 * n + 2] = TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3938 indexInTLComp1 += size1;
3939 indexInTLComp2 += size2;
3942 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
3943 pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3955 std::stringstream f1;
3956 f1 <<
"TLBack1_" << localRank <<
".txt";
3961 rval = cgraph->
set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );
MB_CHK_ERR( rval );
3974 int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
3980 pc.crystal_router()->gs_transfer( 1, TLv, 0 );
3981 if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );
3987 Range primary_ents3;
3988 std::vector< int > values_entities;
3993 if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
3995 assert( *pid2 >= 0 );
3997 TempestMapAppData& tdata = dataIntx.tempestData;
4000 if( 1 == *direction )
4002 tdata.pid_src = pid3;
4010 tdata.pid_dest = pid3;
4044 ErrCode iMOAB_SetMapGhostLayers(
iMOAB_AppID pid,
int* n_src_ghost_layers,
int* n_tgt_ghost_layers )
4049 data.tempestData.num_src_ghost_layers = *n_src_ghost_layers;
4050 data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers;
4058 constexpr
bool validate =
true;
4059 constexpr
bool enforceConvexity =
true;
4060 constexpr
bool use_kdtree_search =
true;
4061 constexpr
bool gnomonic =
true;
4062 constexpr
double defaultradius = 1.0;
4063 constexpr
double boxeps = 1.e-10;
4066 const double epsrel = ReferenceTolerance;
4067 double radius_source = 1.0;
4068 double radius_target = 1.0;
4078 #ifdef MOAB_HAVE_MPI
4083 TempestMapAppData& tdata = data_intx.tempestData;
4086 bool is_parallel =
false, is_root =
true;
4088 #ifdef MOAB_HAVE_MPI
4091 rank = pco_intx->
rank();
4092 is_parallel = ( pco_intx->
size() > 1 );
4093 is_root = ( rank == 0 );
4099 outputFormatter.set_prefix(
"[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4105 rval = ComputeSphereRadius( pid_src, &radius_source );
MB_CHK_ERR( rval );
4106 rval = ComputeSphereRadius( pid_tgt, &radius_target );
MB_CHK_ERR( rval );
4109 outputFormatter.printf( 0,
"Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4115 if( fabs( radius_source - radius_target ) > 1e-10 )
4122 #ifdef MOAB_HAVE_TEMPESTREMAP
4133 if( enforceConvexity )
4140 if( enforceConvexity )
4158 outputFormatter.printf( 0,
"The source set contains %d vertices and %d elements \n", rintxverts.
size(),
4159 rintxelems.
size() );
4160 outputFormatter.printf( 0,
"The target set contains %d vertices and %d elements \n", bintxverts.
size(),
4161 bintxelems.
size() );
4171 tdata.pid_src = pid_src;
4172 tdata.pid_dest = pid_tgt;
4175 #ifdef MOAB_HAVE_MPI
4180 tdata.remapper->meshValidate = validate;
4181 tdata.remapper->constructEdgeMap =
true;
4184 tdata.remapper->initialize(
false );
4193 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false, gnomonic, tdata.num_src_ghost_layers );
MB_CHK_ERR( rval );
4197 rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search,
false );
MB_CHK_ERR( rval );
4202 double local_areas[3], global_areas[3];
4203 local_areas[0] = areaAdaptor.area_on_sphere(
context.
MBI, data_src.
file_set, defaultradius );
4204 local_areas[1] = areaAdaptor.area_on_sphere(
context.
MBI, data_tgt.
file_set, defaultradius );
4205 local_areas[2] = areaAdaptor.area_on_sphere(
context.
MBI, data_intx.
file_set, radius_source );
4206 #ifdef MOAB_HAVE_MPI
4207 global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4208 MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->
comm() );
4210 global_areas[0] = local_areas[0];
4211 global_areas[1] = local_areas[1];
4212 global_areas[2] = local_areas[2];
4217 outputFormatter.printf( 0,
4218 "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4219 "overlap mesh = %12.14f\n",
4220 global_areas[0], global_areas[1], global_areas[2] );
4221 outputFormatter.printf( 0,
" relative error w.r.t source = %12.14e, and target = %12.14e\n",
4222 fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4223 fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4235 double radius_source = 1.0;
4236 double radius_target = 1.0;
4237 const double epsrel = ReferenceTolerance;
4238 const double boxeps = 1.e-8;
4244 #ifdef MOAB_HAVE_MPI
4249 TempestMapAppData& tdata = data_intx.tempestData;
4252 #ifdef MOAB_HAVE_MPI
4263 ComputeSphereRadius( pid_src, &radius_source );
4264 ComputeSphereRadius( pid_tgt, &radius_target );
4275 std::cout <<
"The red set contains " << rintxverts.
size() <<
" vertices and " << rintxelems.
size()
4285 std::cout <<
"The blue set contains " << bintxverts.
size() <<
" vertices and " << bintxelems.
size()
4293 tdata.pid_src = pid_src;
4294 tdata.pid_dest = pid_tgt;
4299 #ifdef MOAB_HAVE_MPI
4304 tdata.remapper->meshValidate =
true;
4305 tdata.remapper->constructEdgeMap =
true;
4308 tdata.remapper->initialize(
false );
4315 if( fabs( radius_source - radius_target ) > 1e-10 )
4325 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false );
MB_CHK_ERR( rval );
4327 #ifdef MOAB_HAVE_MPI
4336 rval = tdata.remapper->ComputeGlobalLocalMaps();
MB_CHK_ERR( rval );
4341 ErrCode iMOAB_ComputeScalarProjectionWeights(
4345 int* disc_order_source,
4347 int* disc_order_target,
4350 int* fMonotoneTypeID,
4352 int* fInverseDistanceMap,
4353 int* fNoConservation,
4360 assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4361 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4362 assert( disc_method_source && strlen( disc_method_source ) );
4363 assert( disc_method_target && strlen( disc_method_target ) );
4364 assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4365 assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4369 TempestMapAppData& tdata = data_intx.tempestData;
4373 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
4377 assert( weightMap != NULL );
4379 GenerateOfflineMapAlgorithmOptions mapOptions;
4380 mapOptions.nPin = *disc_order_source;
4381 mapOptions.nPout = *disc_order_target;
4382 mapOptions.fSourceConcave =
false;
4383 mapOptions.fTargetConcave =
false;
4385 mapOptions.strMethod =
"";
4386 if( fv_method ) mapOptions.strMethod += std::string( fv_method ) +
";";
4387 if( fMonotoneTypeID )
4389 switch( *fMonotoneTypeID )
4392 mapOptions.fMonotone =
false;
4395 mapOptions.strMethod +=
"mono3;";
4398 mapOptions.strMethod +=
"mono2;";
4401 mapOptions.fMonotone =
true;
4405 mapOptions.fMonotone =
false;
4407 mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4408 mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4409 mapOptions.fNoCorrectAreas =
false;
4411 mapOptions.fNoCheck =
true;
4412 if( fVolumetric && *fVolumetric ) mapOptions.strMethod +=
"volumetric;";
4413 if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod +=
"invdist;";
4415 std::string metadataStr = mapOptions.strMethod +
";" + std::string( disc_method_source ) +
":" +
4416 std::to_string( *disc_order_source ) +
":" + std::string( source_solution_tag_dof_name ) +
4417 ";" + std::string( disc_method_target ) +
":" + std::to_string( *disc_order_target ) +
4418 ":" + std::string( target_solution_tag_dof_name );
4420 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4426 std::string( disc_method_source ),
4427 std::string( disc_method_target ),
4429 std::string( source_solution_tag_dof_name ),
4430 std::string( target_solution_tag_dof_name )
4436 if( fValidate && *fValidate )
4438 const double dNormalTolerance = 1.0E-8;
4439 const double dStrictTolerance = 1.0E-12;
4440 weightMap->CheckMap(
true,
true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4446 ErrCode iMOAB_ApplyScalarProjectionWeights(
4453 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4454 assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4455 assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4461 TempestMapAppData& tdata = data_intx.tempestData;
4465 if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) )
4470 std::vector< std::string > srcNames;
4471 std::vector< std::string > tgtNames;
4472 std::vector< Tag > srcTagHandles;
4473 std::vector< Tag > tgtTagHandles;
4474 std::string separator(
":" );
4475 std::string src_name( source_solution_tag_name );
4476 std::string tgt_name( target_solution_tag_name );
4479 if( srcNames.size() != tgtNames.size() )
4481 std::cout <<
" error in parsing source and target tag names. \n";
4482 return moab::MB_FAILURE;
4485 for(
size_t i = 0; i < srcNames.size(); i++ )
4489 if(
MB_SUCCESS != rval || NULL == tagHandle )
4491 return moab::MB_FAILURE;
4493 srcTagHandles.push_back( tagHandle );
4495 if(
MB_SUCCESS != rval || NULL == tagHandle )
4497 return moab::MB_FAILURE;
4499 tgtTagHandles.push_back( tagHandle );
4502 std::vector< double > solSTagVals;
4503 std::vector< double > solTTagVals;
4513 solSTagVals.resize( covSrcEnts.
size(), 0. );
4526 solTTagVals.resize( tgtEnts.
size(), 0. );
4555 switch( *filter_type )
4571 for(
size_t i = 0; i < srcTagHandles.size(); i++ )
4576 rval = weightMap->
ApplyWeights( srcTagHandles[i], tgtTagHandles[i],
false, caasType );
MB_CHK_ERR( rval );
4585 Tag ssolnTag = srcTagHandles[ivar];
4586 std::stringstream sstr;
4587 sstr <<
"covsrcTagData_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
4588 std::ofstream output_file( sstr.str().c_str() );
4589 for(
unsigned i = 0; i < sents.
size(); ++i )
4592 std::vector< double > locsolSTagVals( 16 );
4595 for(
unsigned j = 0; j < 16; ++j )
4596 output_file << locsolSTagVals[j] <<
" ";
4598 output_file.flush();
4599 output_file.close();
4602 std::stringstream sstr;
4603 sstr <<
"outputSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4609 std::stringstream sstr;
4610 sstr <<
"outputCovSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4617 std::stringstream sstr;
4618 sstr <<
"covMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
4624 std::stringstream sstr;
4625 sstr <<
"tgtMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
4631 std::stringstream sstr;
4632 sstr <<
"colvector_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
4633 std::ofstream output_file( sstr.str().c_str() );
4634 for(
unsigned i = 0; i < solSTagVals.size(); ++i )
4635 output_file << i <<
" " << weightMap->col_dofmap[i] <<
" " << weightMap->
col_gdofmap[i] <<
" "
4636 << solSTagVals[i] <<
"\n";
4637 output_file.flush();
4638 output_file.close();