20 #include <H5Tpublic.h>
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
57 std::map< std::string, moab::TempestOnlineMap* > weightMaps;
94 std::map< int, ParCommGraph* > pgraph;
97 #ifdef MOAB_HAVE_TEMPESTREMAP
98 TempestMapAppData tempestData;
99 std::map< std::string, std::string > metadataMap;
119 std::vector< ParallelComm* > pcomms;
152 for(
int i = 0; i < 4; i++ )
168 MPI_Initialized( &flagInit );
216 std::string name( app_name );
220 std::cout <<
" application " << name <<
" already registered \n";
221 return moab::MB_FAILURE;
228 MPI_Comm_rank( *comm, &rankHere );
230 if( !rankHere ) std::cout <<
" application " << name <<
" with ID = " << *pid <<
" is registered now \n";
233 std::cout <<
" convention for external application is to have its id positive \n";
234 return moab::MB_FAILURE;
239 std::cout <<
" external application with comp id " << *compid <<
" is already registered\n";
240 return moab::MB_FAILURE;
252 int index = pco->
get_id();
253 assert( index == *pid );
257 context.pcomms.push_back( pco );
272 app_data.
name = name;
274 #ifdef MOAB_HAVE_TEMPESTREMAP
275 app_data.tempestData.remapper = NULL;
303 assert( app_name !=
nullptr );
304 std::string name( app_name );
313 ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
340 rankHere = pco->
rank();
343 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
344 <<
" is de-registered now \n";
351 fileents.
insert( fileSet );
355 #ifdef MOAB_HAVE_TEMPESTREMAP
356 if( data.tempestData.remapper )
delete data.tempestData.remapper;
357 if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
365 std::map< int, ParCommGraph* >& pargs = data.pgraph;
368 for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
374 if( pco )
delete pco;
388 if( !adj_ents_left.
empty() )
392 vertices =
subtract( vertices, conn_verts );
397 std::map< std::string, int >::iterator mit;
401 int pidx = mit->second;
410 std::map< int, int >::iterator mit1;
414 int pidx = mit1->second;
443 std::string& separator,
444 std::vector< std::string >& list_tag_names )
448 while( ( pos = input_names.find( separator ) ) != std::string::npos )
450 token = input_names.substr( 0, pos );
451 if( !token.empty() ) list_tag_names.push_back( token );
453 input_names.erase( 0, pos + separator.length() );
455 if( !input_names.empty() )
458 list_tag_names.push_back( input_names );
464 int* num_global_vertices,
465 int* num_global_elements,
470 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
472 #ifdef MOAB_HAVE_HDF5
473 std::string filen( filename );
478 if( num_global_vertices ) *num_global_vertices = 0;
479 if( num_global_elements ) *num_global_elements = 0;
480 if( num_dimension ) *num_dimension = 0;
481 if( num_parts ) *num_parts = 0;
485 unsigned long max_id;
488 file =
mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
492 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
493 return moab::MB_FAILURE;
501 fprintf( stderr,
"%s: %s\n", filename,
mhdf_message( &status ) );
502 return moab::MB_FAILURE;
506 if( num_global_vertices ) *num_global_vertices = (int)data->
nodes.
count;
515 edges += ent_d->
count;
520 faces += ent_d->
count;
525 faces += ent_d->
count;
530 faces += ent_d->
count;
535 regions += ent_d->
count;
540 regions += ent_d->
count;
545 regions += ent_d->
count;
550 regions += ent_d->
count;
555 regions += ent_d->
count;
560 regions += ent_d->
count;
565 regions += ent_d->
count;
569 if( num_parts ) *num_parts = data->
numEntSets[0];
574 if( num_dimension ) *num_dimension = 1;
575 if( num_global_elements ) *num_global_elements = edges;
580 if( num_dimension ) *num_dimension = 2;
581 if( num_global_elements ) *num_global_elements = faces;
586 if( num_dimension ) *num_dimension = 3;
587 if( num_global_elements ) *num_global_elements = regions;
595 std::cout << filename
596 <<
": Please reconfigure with HDF5. Cannot retrieve header information for file "
597 "formats other than a h5m file.\n";
598 if( num_global_vertices ) *num_global_vertices = 0;
599 if( num_global_elements ) *num_global_elements = 0;
600 if( num_dimension ) *num_dimension = 0;
601 if( num_parts ) *num_parts = 0;
610 int* num_ghost_layers )
613 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
617 std::ostringstream newopts;
618 if( read_options ) newopts << read_options;
626 std::string opts( ( read_options ? read_options :
"" ) );
627 std::string pcid(
"PARALLEL_COMM=" );
628 std::size_t found = opts.find( pcid );
630 if( found != std::string::npos )
632 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
633 return moab::MB_FAILURE;
638 std::string filen( filename );
639 std::string::size_type idx = filen.rfind(
'.' );
641 if( idx != std::string::npos )
643 std::string extension = filen.substr( idx + 1 );
644 if( ( extension == std::string(
"h5m" ) ) || ( extension == std::string(
"nc" ) ) )
645 newopts <<
";;PARALLEL_COMM=" << *pid;
648 if( *num_ghost_layers >= 1 )
652 std::string pcid2(
"PARALLEL_GHOSTS=" );
653 std::size_t found2 = opts.find( pcid2 );
655 if( found2 != std::string::npos )
657 std::cout <<
" PARALLEL_GHOSTS option is already specified, ignore passed "
658 "number of layers \n";
665 newopts <<
";PARALLEL_GHOSTS=3.0." << *num_ghost_layers <<
".3";
671 IMOAB_ASSERT( *num_ghost_layers == 0,
"Cannot provide ghost layers in serial." );
679 std::ostringstream outfile;
681 int rank =
context.pcomms[*pid]->rank();
682 int nprocs =
context.pcomms[*pid]->size();
683 outfile <<
"TaskMesh_n" << nprocs <<
"." << rank <<
".h5m";
685 outfile <<
"TaskMesh_n1.0.h5m";
702 IMOAB_ASSERT( strlen( filename ),
"Invalid filename length." );
707 std::ostringstream newopts;
709 std::string write_opts( ( write_options ? write_options :
"" ) );
710 std::string pcid(
"PARALLEL_COMM=" );
711 std::size_t found = write_opts.find( pcid );
713 if( found != std::string::npos )
715 std::cerr <<
" cannot specify PARALLEL_COMM option, it is implicit \n";
716 return moab::MB_FAILURE;
720 std::string pw(
"PARALLEL=WRITE_PART" );
721 found = write_opts.find( pw );
723 if( found != std::string::npos )
725 newopts <<
"PARALLEL_COMM=" << *pid <<
";";
730 if( write_options ) newopts << write_options;
732 std::vector< Tag > copyTagList = data.
tagList;
734 std::string gid_name_tag(
"GLOBAL_ID" );
737 if( data.
tagMap.find( gid_name_tag ) == data.
tagMap.end() )
740 copyTagList.push_back( gid );
743 std::string pp_name_tag(
"PARALLEL_PARTITION" );
746 if( data.
tagMap.find( pp_name_tag ) == data.
tagMap.end() )
750 if( ptag ) copyTagList.push_back( ptag );
763 IMOAB_ASSERT( strlen( prefix ),
"Invalid prefix string length." );
765 std::ostringstream file_name;
766 int rank = 0,
size = 1;
768 rank =
context.pcomms[*pid]->rank();
771 file_name << prefix <<
"_" <<
size <<
"_" << rank <<
".h5m";
846 int local[2], global[2];
849 MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->
comm() );
879 int* num_visible_vertices,
880 int* num_visible_elements,
881 int* num_visible_blocks,
882 int* num_visible_surfaceBC,
883 int* num_visible_vertexBC )
891 if( num_visible_elements )
895 num_visible_elements[0] =
static_cast< int >( data.
owned_elems.
size() );
896 num_visible_elements[1] =
static_cast< int >( data.
ghost_elems.
size() );
898 if( num_visible_vertices )
900 num_visible_vertices[2] =
static_cast< int >( data.
all_verts.
size() );
903 num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
906 if( num_visible_blocks )
912 num_visible_blocks[0] = num_visible_blocks[2];
913 num_visible_blocks[1] = 0;
916 if( num_visible_surfaceBC )
921 num_visible_surfaceBC[2] = 0;
926 for(
int i = 0; i < numNeuSets; i++ )
935 Range adjPrimaryEnts;
938 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.
size();
942 num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
943 num_visible_surfaceBC[1] = 0;
946 if( num_visible_vertexBC )
951 num_visible_vertexBC[2] = 0;
954 for(
int i = 0; i < numDiriSets; i++ )
960 num_visible_vertexBC[2] += (int)verts.
size();
963 num_visible_vertexBC[0] = num_visible_vertexBC[2];
964 num_visible_vertexBC[1] = 0;
977 IMOAB_ASSERT( *vertices_length ==
static_cast< int >( verts.
size() ),
"Invalid vertices length provided" );
985 assert( vertices_length && *vertices_length );
986 assert( visible_global_rank_ID );
998 if( i != *vertices_length )
1000 return moab::MB_FAILURE;
1006 if( (
int)verts.
size() != *vertices_length )
1008 return moab::MB_FAILURE;
1013 visible_global_rank_ID[i] = 0;
1026 if( *coords_length != 3 * (
int)verts.
size() )
1028 return moab::MB_FAILURE;
1042 if( *block_length != (
int)matSets.
size() )
1044 return moab::MB_FAILURE;
1052 for(
unsigned i = 0; i < matSets.
size(); i++ )
1054 matIdx[global_block_IDs[i]] = i;
1062 int* vertices_per_element,
1063 int* num_elements_in_block )
1065 assert( global_block_ID );
1068 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1070 if( it == matMap.end() )
1072 return moab::MB_FAILURE;
1075 int blockIndex = matMap[*global_block_ID];
1082 return moab::MB_FAILURE;
1089 return moab::MB_FAILURE;
1096 *vertices_per_element = num_verts;
1097 *num_elements_in_block = (int)blo_elems.
size();
1103 int* num_visible_elements,
1109 #ifdef MOAB_HAVE_MPI
1119 #ifdef MOAB_HAVE_MPI
1144 return moab::MB_FAILURE;
1147 if( -1 >= *num_visible_elements )
1149 return moab::MB_FAILURE;
1152 block_IDs[index] = valMatTag;
1161 int* connectivity_length,
1162 int* element_connectivity )
1164 assert( global_block_ID );
1165 assert( connectivity_length );
1168 std::map< int, int >& matMap = data.
matIndex;
1169 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1171 if( it == matMap.end() )
1173 return moab::MB_FAILURE;
1176 int blockIndex = matMap[*global_block_ID];
1178 std::vector< EntityHandle > elems;
1184 return moab::MB_FAILURE;
1187 std::vector< EntityHandle > vconnect;
1190 if( *connectivity_length != (
int)vconnect.size() )
1192 return moab::MB_FAILURE;
1195 for(
int i = 0; i < *connectivity_length; i++ )
1201 return moab::MB_FAILURE;
1204 element_connectivity[i] = inx;
1212 int* connectivity_length,
1213 int* element_connectivity )
1215 assert( elem_index );
1216 assert( connectivity_length );
1219 assert( ( *elem_index >= 0 ) && ( *elem_index < (
int)data.
primary_elems.
size() ) );
1228 if( *connectivity_length < num_nodes )
1230 return moab::MB_FAILURE;
1233 for(
int i = 0; i < num_nodes; i++ )
1239 return moab::MB_FAILURE;
1242 element_connectivity[i] = index;
1245 *connectivity_length = num_nodes;
1252 int* num_elements_in_block,
1253 int* element_ownership )
1255 assert( global_block_ID );
1256 assert( num_elements_in_block );
1260 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1262 if( it == matMap.end() )
1264 return moab::MB_FAILURE;
1267 int blockIndex = matMap[*global_block_ID];
1275 return moab::MB_FAILURE;
1278 if( *num_elements_in_block != (
int)elems.
size() )
1280 return moab::MB_FAILURE;
1284 #ifdef MOAB_HAVE_MPI
1290 #ifdef MOAB_HAVE_MPI
1293 element_ownership[i] = 0;
1302 int* num_elements_in_block,
1306 assert( global_block_ID );
1307 assert( num_elements_in_block );
1310 std::map< int, int >& matMap = data.
matIndex;
1312 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1314 if( it == matMap.end() )
1316 return moab::MB_FAILURE;
1319 int blockIndex = matMap[*global_block_ID];
1326 return moab::MB_FAILURE;
1329 if( *num_elements_in_block != (
int)elems.
size() )
1331 return moab::MB_FAILURE;
1337 for(
int i = 0; i < *num_elements_in_block; i++ )
1341 if( -1 == local_element_ID[i] )
1343 return moab::MB_FAILURE;
1351 int* surface_BC_length,
1353 int* reference_surface_ID,
1354 int* boundary_condition_value )
1365 for(
int i = 0; i < numNeuSets; i++ )
1377 Range adjPrimaryEnts;
1395 if( -1 == local_element_ID[index] )
1397 return moab::MB_FAILURE;
1404 boundary_condition_value[index] = neuVal;
1410 if( index != *surface_BC_length )
1412 return moab::MB_FAILURE;
1419 int* vertex_BC_length,
1421 int* boundary_condition_value )
1430 for(
int i = 0; i < numDiriSets; i++ )
1449 if( -1 == local_vertex_ID[index] )
1451 return moab::MB_FAILURE;
1454 boundary_condition_value[index] = diriVal;
1459 if( *vertex_BC_length != index )
1461 return moab::MB_FAILURE;
1470 int* components_per_entity,
1474 if( *tag_type < 0 || *tag_type > 5 )
1476 return moab::MB_FAILURE;
1481 void* defaultVal = NULL;
1482 int* defInt =
new int[*components_per_entity];
1483 double* defDouble =
new double[*components_per_entity];
1486 for(
int i = 0; i < *components_per_entity; i++ )
1489 defDouble[i] = -1e+10;
1498 defaultVal = defInt;
1504 defaultVal = defDouble;
1510 defaultVal = defHandle;
1516 defaultVal = defInt;
1522 defaultVal = defDouble;
1528 defaultVal = defHandle;
1535 return moab::MB_FAILURE;
1542 std::string tag_name( tag_storage_name );
1546 std::vector< std::string > tagNames;
1547 std::string separator(
":" );
1552 int already_defined_tags = 0;
1554 for(
size_t i = 0; i < tagNames.size(); i++ )
1561 std::map< std::string, Tag >& mTags = data.
tagMap;
1562 std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1564 if( mit == mTags.end() )
1567 mTags[tagNames[i]] = tagHandle;
1569 *tag_index = (int)data.
tagList.size();
1570 data.
tagList.push_back( tagHandle );
1573 already_defined_tags++;
1577 data.
tagMap[tagNames[i]] = tagHandle;
1578 *tag_index = (int)data.
tagList.size();
1579 data.
tagList.push_back( tagHandle );
1583 rval = moab::MB_FAILURE;
1588 #ifdef MOAB_HAVE_MPI
1590 rankHere = pco->
rank();
1592 if( !rankHere && already_defined_tags )
1593 std::cout <<
" application with ID: " << *pid <<
" global id: " << data.
global_id <<
" name: " << data.
name
1594 <<
" has " << already_defined_tags <<
" already defined tags out of " << tagNames.size()
1604 int* num_tag_storage_length,
1606 int* tag_storage_data )
1608 std::string tag_name( tag_storage_name );
1613 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
1615 return moab::MB_FAILURE;
1628 return moab::MB_FAILURE;
1634 if( *ent_type == 0 )
1643 int nents_to_be_set = *num_tag_storage_length / tagLength;
1645 if( nents_to_be_set > (
int)ents_to_set->
size() || nents_to_be_set < 1 )
1647 return moab::MB_FAILURE;
1661 int* num_tag_storage_length,
1663 int* tag_storage_data )
1666 std::string tag_name( tag_storage_name );
1670 if( data.
tagMap.find( tag_name ) == data.
tagMap.end() )
1672 return moab::MB_FAILURE;
1685 return moab::MB_FAILURE;
1691 if( *ent_type == 0 )
1700 int nents_to_get = *num_tag_storage_length / tagLength;
1702 if( nents_to_get > (
int)ents_to_get->
size() || nents_to_get < 1 )
1704 return moab::MB_FAILURE;
1718 int* num_tag_storage_length,
1720 double* tag_storage_data )
1723 std::string tag_names( tag_storage_names );
1725 std::vector< std::string > tagNames;
1726 std::vector< Tag > tagHandles;
1727 std::string separator(
":" );
1731 Range* ents_to_set = NULL;
1733 if( *ent_type == 0 )
1737 else if( *ent_type == 1 )
1742 int nents_to_be_set = (int)( *ents_to_set ).
size();
1745 for(
size_t i = 0; i < tagNames.size(); i++ )
1747 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1749 return moab::MB_FAILURE;
1762 return moab::MB_FAILURE;
1766 if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1767 return moab::MB_FAILURE;
1771 position = position + tagLength * nents_to_be_set;
1778 int* num_tag_storage_length,
1780 double* tag_storage_data,
1784 std::string tag_names( tag_storage_names );
1786 std::vector< std::string > tagNames;
1787 std::vector< Tag > tagHandles;
1788 std::string separator(
":" );
1792 Range* ents_to_set = NULL;
1794 if( *ent_type == 0 )
1798 else if( *ent_type == 1 )
1803 int nents_to_be_set = (int)( *ents_to_set ).
size();
1806 std::vector< int > gids;
1807 gids.resize( nents_to_be_set );
1813 std::map< int, EntityHandle > eh_by_gid;
1817 eh_by_gid[gids[i]] = *it;
1820 std::vector< int > tagLengths( tagNames.size() );
1821 std::vector< Tag > tagList;
1822 size_t total_tag_len = 0;
1823 for(
size_t i = 0; i < tagNames.size(); i++ )
1825 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
1827 MB_SET_ERR( moab::MB_FAILURE,
"tag missing" );
1831 tagList.push_back( tag );
1836 total_tag_len += tagLength;
1837 tagLengths[i] = tagLength;
1843 MB_SET_ERR( moab::MB_FAILURE,
"tag not double type" );
1847 #ifdef MOAB_HAVE_MPI
1849 unsigned num_procs = pco->
size();
1850 if( num_procs > 1 ) serial =
false;
1859 for(
int i = 0; i < nents_to_be_set; i++ )
1861 int gid = globalIds[i];
1862 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1863 if( mapIt == eh_by_gid.end() )
continue;
1866 int indexInTagValues = 0;
1867 for(
size_t j = 0; j < tagList.size(); j++ )
1869 indexInTagValues += i * tagLengths[j];
1872 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];
1876 #ifdef MOAB_HAVE_MPI
1883 int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
1887 TLsend.
initialize( 2, 0, 0, total_tag_len, nbLocalVals );
1891 int indexInRealLocal = 0;
1892 for(
int i = 0; i < nbLocalVals; i++ )
1895 int marker = globalIds[i];
1896 int to_proc = marker % num_procs;
1897 int n = TLsend.
get_n();
1898 TLsend.
vi_wr[2 * n] = to_proc;
1899 TLsend.
vi_wr[2 * n + 1] = marker;
1900 int indexInTagValues = 0;
1902 for(
size_t j = 0; j < tagList.size(); j++ )
1904 indexInTagValues += i * tagLengths[j];
1905 for(
int k = 0; k < tagLengths[j]; k++ )
1907 TLsend.
vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1909 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1919 TLreq.
initialize( 2, 0, 0, 0, nents_to_be_set );
1921 for(
int i = 0; i < nents_to_be_set; i++ )
1924 int marker = gids[i];
1925 int to_proc = marker % num_procs;
1926 int n = TLreq.
get_n();
1927 TLreq.
vi_wr[2 * n] = to_proc;
1928 TLreq.
vi_wr[2 * n + 1] = marker;
1939 sort_buffer.buffer_init( TLreq.
get_n() );
1940 TLreq.
sort( 1, &sort_buffer );
1941 sort_buffer.
reset();
1942 sort_buffer.buffer_init( TLsend.
get_n() );
1943 TLsend.
sort( 1, &sort_buffer );
1944 sort_buffer.
reset();
1952 TLBack.
initialize( 3, 0, 0, total_tag_len, 0 );
1955 int n1 = TLreq.
get_n();
1956 int n2 = TLsend.
get_n();
1958 int indexInTLreq = 0;
1959 int indexInTLsend = 0;
1960 if( n1 > 0 && n2 > 0 )
1963 while( indexInTLreq < n1 && indexInTLsend < n2 )
1965 int currentValue1 = TLreq.
vi_rd[2 * indexInTLreq + 1];
1966 int currentValue2 = TLsend.
vi_rd[2 * indexInTLsend + 1];
1967 if( currentValue1 < currentValue2 )
1975 if( currentValue1 > currentValue2 )
1983 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.
vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1985 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.
vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1988 for(
int i1 = 0; i1 < size1; i1++ )
1990 for(
int i2 = 0; i2 < size2; i2++ )
1993 int n = TLBack.
get_n();
1995 TLBack.
vi_wr[3 * n] = TLreq.
vi_rd[2 * ( indexInTLreq + i1 )];
1997 TLBack.
vi_wr[3 * n + 1] = currentValue1;
1998 TLBack.
vi_wr[3 * n + 2] = TLsend.
vi_rd[2 * ( indexInTLsend + i2 )];
2000 for(
size_t k = 0; k < total_tag_len; k++ )
2002 TLBack.
vr_rd[total_tag_len * n + k] =
2003 TLsend.
vr_rd[total_tag_len * indexInTLsend + k];
2007 indexInTLreq += size1;
2008 indexInTLsend += size2;
2014 n1 = TLBack.
get_n();
2015 double* ptrVal = &TLBack.
vr_rd[0];
2016 for(
int i = 0; i < n1; i++ )
2018 int gid = TLBack.
vi_rd[3 * i + 1];
2019 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2020 if( mapIt == eh_by_gid.end() )
continue;
2024 for(
size_t j = 0; j < tagList.size(); j++ )
2028 ptrVal += tagLengths[j];
2037 int* num_tag_storage_length,
2039 double* tag_storage_data )
2043 std::string tag_names( tag_storage_names );
2045 std::vector< std::string > tagNames;
2046 std::vector< Tag > tagHandles;
2047 std::string separator(
":" );
2053 Range* ents_to_get = NULL;
2055 if( *ent_type == 0 )
2059 else if( *ent_type == 1 )
2063 int nents_to_get = (int)ents_to_get->
size();
2065 for(
size_t i = 0; i < tagNames.size(); i++ )
2067 if( data.
tagMap.find( tagNames[i] ) == data.
tagMap.end() )
2069 return moab::MB_FAILURE;
2082 return moab::MB_FAILURE;
2085 if( position + nents_to_get * tagLength > *num_tag_storage_length )
2086 return moab::MB_FAILURE;
2089 position = position + nents_to_get * tagLength;
2097 #ifdef MOAB_HAVE_MPI
2100 std::vector< Tag > tags;
2102 for(
int i = 0; i < *num_tag; i++ )
2104 if( tag_indices[i] < 0 || tag_indices[i] >= (
int)data.
tagList.size() )
2106 return moab::MB_FAILURE;
2109 tags.push_back( data.
tagList[tag_indices[i]] );
2112 if( *ent_type == 0 )
2116 else if( *ent_type == 1 )
2122 return moab::MB_FAILURE;
2133 int k = *pid + *num_tag + *tag_indices + *ent_type;
2142 #ifdef MOAB_HAVE_MPI
2146 if( *tag_index < 0 || *tag_index >= (
int)data.
tagList.size() )
2148 return moab::MB_FAILURE;
2153 if( *ent_type == 0 )
2157 else if( *ent_type == 1 )
2163 return moab::MB_FAILURE;
2174 int k = *pid + *tag_index + *ent_type;
2182 int* num_adjacent_elements,
2194 if( *num_adjacent_elements < (
int)adjs.
size() )
2196 return moab::MB_FAILURE;
2199 *num_adjacent_elements = (int)adjs.
size();
2201 for(
int i = 0; i < *num_adjacent_elements; i++ )
2225 return moab::MB_FAILURE;
2228 int nverts = *coords_len / *
dim;
2242 int* num_nodes_per_element,
2252 EntityType mbtype = (EntityType)( *type );
2261 for(
int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2263 array[j] = connectivity[j] + firstVertex - 1;
2266 Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2277 int set_no = *block_ID;
2278 const void* setno_ptr = &set_no;
2283 if( MB_FAILURE == rval || sets.
empty() )
2295 block_set = sets[0];
2315 if( NULL != num_global_verts )
2319 if( NULL != num_global_elems )
2327 #ifdef MOAB_HAVE_MPI
2353 return moab::MB_FAILURE;
2370 std::cout <<
" can't get par part tag.\n";
2371 return moab::MB_FAILURE;
2374 int rank = pco->
rank();
2386 if( num_ghost_layers && *num_ghost_layers <= 0 )
2395 constexpr
int addl_ents = 0;
2398 for(
int i = 2; i <= *num_ghost_layers; i++ )
2412 ErrCode iMOAB_SendMesh(
iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup,
int* rcompid,
int* method )
2414 assert( join !=
nullptr );
2415 assert( receivingGroup !=
nullptr );
2416 assert( rcompid !=
nullptr );
2423 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2424 MPI_Group recvGroup =
2425 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( receivingGroup ) ) : *receivingGroup );
2426 MPI_Comm sender = pco->
comm();
2428 MPI_Group senderGroup;
2429 ierr = MPI_Comm_group( sender, &senderGroup );
2430 if( ierr != 0 )
return moab::MB_FAILURE;
2441 int sender_rank = -1;
2442 MPI_Comm_rank( sender, &sender_rank );
2447 std::vector< int > number_elems_per_part;
2451 if( owned.
size() == 0 )
2460 int local_owned_elem = (int)owned.
size();
2462 int rank = pco->
rank();
2463 number_elems_per_part.resize(
size );
2464 number_elems_per_part[rank] = local_owned_elem;
2465 #if( MPI_VERSION >= 2 )
2467 ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2470 std::vector< int > all_tmp(
size );
2471 ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2472 number_elems_per_part = all_tmp;
2478 return moab::MB_FAILURE;
2499 MPI_Group_free( &senderGroup );
2503 ErrCode iMOAB_ReceiveMesh(
iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup,
int* scompid )
2505 assert( join !=
nullptr );
2506 assert( sendingGroup !=
nullptr );
2507 assert( scompid !=
nullptr );
2512 MPI_Comm receive = pco->
comm();
2515 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2516 MPI_Group sendGroup =
2517 ( data.
is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( sendingGroup ) ) : *sendingGroup );
2521 MPI_Group receiverGroup;
2522 int ierr = MPI_Comm_group( receive, &receiverGroup );
CHK_MPI_ERR( ierr );
2531 int receiver_rank = -1;
2532 MPI_Comm_rank( receive, &receiver_rank );
2536 std::vector< int > pack_array;
2540 int current_receiver = cgraph->
receiver( receiver_rank );
2542 std::vector< int > senders_local;
2545 while( n < pack_array.size() )
2547 if( current_receiver == pack_array[n] )
2549 for(
int j = 0; j < pack_array[n + 1]; j++ )
2551 senders_local.push_back( pack_array[n + 2 + j] );
2557 n = n + 2 + pack_array[n + 1];
2561 std::cout <<
" receiver " << current_receiver <<
" at rank " << receiver_rank <<
" will receive from "
2562 << senders_local.size() <<
" tasks: ";
2564 for(
int k = 0; k < (int)senders_local.size(); k++ )
2566 std::cout <<
" " << senders_local[k];
2572 if( senders_local.empty() )
2574 std::cout <<
" we do not have any senders for receiver rank " << receiver_rank <<
"\n";
2590 if( (
int)senders_local.size() >= 2 )
2601 std::cout <<
"current_receiver " << current_receiver <<
" local verts: " << local_verts.
size() <<
"\n";
2605 rval = mm.merge_using_integer_tag( local_verts, idtag );
MB_CHK_ERR( rval );
2611 std::cout <<
"after merging: new verts: " << new_verts.
size() <<
"\n";
2629 if( NULL != densePartTag &&
MB_SUCCESS == rval )
2633 std::vector< int > vals;
2634 int rank = pco->
rank();
2635 vals.resize( local_verts.
size(), rank );
2647 std::cout <<
" can't get par part tag.\n";
2648 return moab::MB_FAILURE;
2651 int rank = pco->
rank();
2663 MPI_Group_free( &receiverGroup );
2671 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2672 if( mt == data.pgraph.end() )
2674 return moab::MB_FAILURE;
2682 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2691 #ifdef MOAB_HAVE_TEMPESTREMAP
2692 if( data.tempestData.remapper != NULL )
2706 if( 0 != cover_set )
2712 std::string tag_name( tag_storage_name );
2717 std::vector< std::string > tagNames;
2718 std::vector< Tag > tagHandles;
2719 std::string separator(
":" );
2721 for(
size_t i = 0; i < tagNames.size(); i++ )
2725 if(
MB_SUCCESS != rval || NULL == tagHandle )
2727 std::cout <<
" can't get tag handle for tag named:" << tagNames[i].c_str() <<
" at index " << i <<
"\n";
MB_CHK_SET_ERR( rval,
"can't get tag handle" );
2729 tagHandles.push_back( tagHandle );
2744 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2745 if( mt == data.pgraph.end() )
2747 return moab::MB_FAILURE;
2751 MPI_Comm global = ( data.
is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2761 if( 0 != cover_set )
2772 #ifdef MOAB_HAVE_TEMPESTREMAP
2773 if( data.tempestData.remapper != NULL )
2786 std::string tag_name( tag_storage_name );
2789 std::vector< std::string > tagNames;
2790 std::vector< Tag > tagHandles;
2791 std::string separator(
":" );
2793 for(
size_t i = 0; i < tagNames.size(); i++ )
2797 if(
MB_SUCCESS != rval || NULL == tagHandle )
2799 std::cout <<
" can't get tag handle for tag named:" << tagNames[i].c_str() <<
" at index " << i <<
"\n";
MB_CHK_SET_ERR( rval,
"can't get tag handle" );
2801 tagHandles.push_back( tagHandle );
2805 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name
2806 <<
" and file set = " << ( data.
file_set ) <<
"\n";
2813 std::cout << pco->
rank() <<
". Looking to receive data for tags: " << tag_name <<
"\n";
2824 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2825 if( mt == data.pgraph.end() )
return moab::MB_FAILURE;
2827 mt->second->release_send_buffers();
2850 int localRank = 0, numProcs = 1;
2852 bool isFortran =
false;
2853 if( *pid1 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid1].is_fortran;
2854 if( *pid2 >= 0 ) isFortran = isFortran ||
context.
appDatas[*pid2].is_fortran;
2856 MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
2857 MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group1 ) ) : *group1 );
2858 MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( group2 ) ) : *group2 );
2860 MPI_Comm_rank( global, &localRank );
2861 MPI_Comm_size( global, &numProcs );
2866 bool already_exists =
false;
2870 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
2871 if( mt != data.pgraph.end() ) already_exists =
true;
2876 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
2877 if( mt != data.pgraph.end() ) already_exists =
true;
2880 if( already_exists )
2884 std::cout <<
" parcomgraph already existing between components " << *comp1 <<
" and " << *comp2
2885 <<
". Do not compute again\n";
2893 if( *pid1 >= 0 ) cgraph =
new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2895 if( *pid2 >= 0 ) cgraph_rev =
new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2912 int lenTagType1 = 1;
2913 if( 1 == *type1 || 1 == *type2 )
2920 std::vector< int > valuesComp1;
2927 #ifdef MOAB_HAVE_TEMPESTREMAP
2928 if( data1.tempestData.remapper != NULL )
2934 Range ents_of_interest;
2939 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
2942 else if( *type1 == 2 )
2945 valuesComp1.resize( ents_of_interest.
size() );
2948 else if( *type1 == 3 )
2951 valuesComp1.resize( ents_of_interest.
size() );
2960 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2961 TLcomp1.
resize( uniq.size() );
2962 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2966 int to_proc = marker % numProcs;
2967 int n = TLcomp1.
get_n();
2968 TLcomp1.
vi_wr[2 * n] = to_proc;
2969 TLcomp1.
vi_wr[2 * n + 1] = marker;
2975 pc.crystal_router()->gs_transfer( 1, TLcomp1,
2979 std::stringstream ff1;
2980 ff1 <<
"TLcomp1_" << localRank <<
".txt";
2984 sort_buffer.buffer_init( TLcomp1.
get_n() );
2985 TLcomp1.
sort( 1, &sort_buffer );
2986 sort_buffer.
reset();
2995 std::vector< int > valuesComp2;
3001 #ifdef MOAB_HAVE_TEMPESTREMAP
3002 if( data2.tempestData.remapper != NULL )
3009 Range ents_of_interest;
3014 valuesComp2.resize( ents_of_interest.
size() * lenTagType1 );
3017 else if( *type2 == 2 )
3020 valuesComp2.resize( ents_of_interest.
size() );
3023 else if( *type2 == 3 )
3026 valuesComp2.resize( ents_of_interest.
size() );
3034 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3035 TLcomp2.
resize( uniq.size() );
3036 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3040 int to_proc = marker % numProcs;
3041 int n = TLcomp2.
get_n();
3042 TLcomp2.
vi_wr[2 * n] = to_proc;
3043 TLcomp2.
vi_wr[2 * n + 1] = marker;
3047 pc.crystal_router()->gs_transfer( 1, TLcomp2,
3051 std::stringstream ff2;
3052 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3055 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3056 TLcomp2.
sort( 1, &sort_buffer );
3057 sort_buffer.
reset();
3079 int n1 = TLcomp1.
get_n();
3080 int n2 = TLcomp2.
get_n();
3082 int indexInTLComp1 = 0;
3083 int indexInTLComp2 = 0;
3084 if( n1 > 0 && n2 > 0 )
3087 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3089 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
3090 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
3091 if( currentValue1 < currentValue2 )
3099 if( currentValue1 > currentValue2 )
3107 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3109 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3112 for(
int i1 = 0; i1 < size1; i1++ )
3114 for(
int i2 = 0; i2 < size2; i2++ )
3117 int n = TLBackToComp1.
get_n();
3119 TLBackToComp1.
vi_wr[3 * n] =
3120 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3122 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
3123 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3124 n = TLBackToComp2.
get_n();
3126 TLBackToComp2.
vi_wr[3 * n] =
3127 TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3128 TLBackToComp2.
vi_wr[3 * n + 1] = currentValue1;
3129 TLBackToComp2.
vi_wr[3 * n + 2] = TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3133 indexInTLComp1 += size1;
3134 indexInTLComp2 += size2;
3137 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
3138 pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3147 std::stringstream f1;
3148 f1 <<
"TLBack1_" << localRank <<
".txt";
3151 sort_buffer.buffer_reserve( TLBackToComp1.
get_n() );
3152 TLBackToComp1.
sort( 1, &sort_buffer );
3153 sort_buffer.
reset();
3168 std::stringstream f2;
3169 f2 <<
"TLBack2_" << localRank <<
".txt";
3172 sort_buffer.buffer_reserve( TLBackToComp2.
get_n() );
3173 TLBackToComp2.
sort( 2, &sort_buffer );
3174 sort_buffer.
reset();
3192 std::vector< Tag > tagsList;
3195 if( !tag || rval !=
MB_SUCCESS )
return moab::MB_FAILURE;
3196 tagsList.push_back( tag );
3198 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3200 if( tag && rval ==
MB_SUCCESS ) tagsList.push_back( tag );
3201 double tol = 1.0e-9;
3237 std::cout <<
" can't get par part tag.\n";
3238 return moab::MB_FAILURE;
3241 int rank = pco->
rank();
3247 #ifdef MOAB_HAVE_TEMPESTREMAP
3254 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
3265 std::vector< int > srcSenders;
3266 std::vector< int > receivers;
3269 int default_context_id = -1;
3270 bool is_fortran_context =
false;
3276 default_context_id = *migr_id;
3278 is_fortran_context =
context.
appDatas[*pid_src].is_fortran || is_fortran_context;
3282 srcSenders = sendGraph->
senders();
3285 std::cout <<
"senders: " << srcSenders.size() <<
" first sender: " << srcSenders[0] << std::endl;
3290 if( *pid_migr >= 0 )
3292 is_fortran_context =
context.
appDatas[*pid_migr].is_fortran || is_fortran_context;
3294 default_context_id = *src_id;
3298 srcSenders = recvGraph->
senders();
3301 std::cout <<
"receivers: " << receivers.size() <<
" first receiver: " << receivers[0] << std::endl;
3305 if( *pid_intx >= 0 ) is_fortran_context =
context.
appDatas[*pid_intx].is_fortran || is_fortran_context;
3318 MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( join ) ) : *join );
3319 int currentRankInJointComm = -1;
3320 ierr = MPI_Comm_rank( global, ¤tRankInJointComm );
CHK_MPI_ERR( ierr );
3324 if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3328 if( *pid_intx >= (
int)
context.
appDatas.size() )
return moab::MB_FAILURE;
3331 Tag parentTag, orgSendProcTag;
3334 if( !parentTag )
return moab::MB_FAILURE;
3337 if( !orgSendProcTag )
return moab::MB_FAILURE;
3345 std::map< int, std::set< int > > idsFromProcs;
3349 int gidCell, origProc;
3355 if( origProc < 0 )
continue;
3357 std::set< int >& setInts = idsFromProcs[origProc];
3358 setInts.insert( gidCell );
3367 assert( *pid_intx >= 0 );
3379 int gidCell, origProc;
3387 std::set< int >& setInts = idsFromProcs[origProc];
3388 setInts.insert( gidCell );
3393 std::ofstream dbfile;
3394 std::stringstream outf;
3395 outf <<
"idsFromProc_0" << currentRankInJointComm <<
".txt";
3396 dbfile.open( outf.str().c_str() );
3397 dbfile <<
"Writing this to a file.\n";
3399 dbfile <<
" map size:" << idsFromProcs.size()
3403 for( std::map<
int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3405 std::set< int >& setIds = mt->second;
3406 dbfile <<
"from id: " << mt->first <<
" receive " << setIds.size() <<
" cells \n";
3408 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3411 dbfile <<
" " << valueID;
3413 if( counter % 10 == 0 ) dbfile <<
"\n";
3419 if( NULL != recvGraph )
3426 assert( *pid_intx >= 0 );
3432 for( std::map<
int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
3434 int procToSendTo = mit->first;
3435 std::set< int >& idSet = mit->second;
3436 for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
3438 int n = TLcovIDs.
get_n();
3440 TLcovIDs.
vi_wr[2 * n] = procToSendTo;
3441 TLcovIDs.
vi_wr[2 * n + 1] = *sit;
3447 pc.crystal_router()->gs_transfer( 1, TLcovIDs,
3452 if( NULL != sendGraph )
3466 assert( prefix && strlen( prefix ) );
3469 std::string prefix_str( prefix );
3471 if( NULL != cgraph )
3475 std::cout <<
" cannot find ParCommGraph on app with pid " << *pid <<
" name: " <<
context.
appDatas[*pid].name
3476 <<
" context: " << *context_id <<
"\n";
3485 #ifdef MOAB_HAVE_TEMPESTREMAP
3487 #ifdef MOAB_HAVE_NETCDF
3488 ErrCode iMOAB_LoadMappingWeightsFromFile(
3497 bool row_based_partition =
true;
3498 if( *col_or_row == 1 ) row_based_partition =
false;
3504 TempestMapAppData& tdata = data_intx.tempestData;
3507 if( tdata.remapper == NULL )
3510 #ifdef MOAB_HAVE_MPI
3516 tdata.remapper->meshValidate =
true;
3517 tdata.remapper->constructEdgeMap =
true;
3519 tdata.remapper->initialize(
false );
3525 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
3529 assert( weightMap != NULL );
3540 int lenTagType1 = 1;
3548 std::vector< int > dofValues;
3559 dofValues.resize( ents_of_interest.
size() * lenTagType1 );
3561 ndofPerEl = lenTagType1;
3563 else if( *type == 2 )
3566 dofValues.resize( ents_of_interest.
size() );
3569 else if( *type == 3 )
3572 dofValues.resize( ents_of_interest.
size() );
3580 std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
3582 std::sort( orderDofs.begin(), orderDofs.end() );
3583 orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() );
3588 if( row_based_partition )
3590 tdata.pid_dest = pid_cpl;
3597 tdata.pid_src = pid_cpl;
3605 std::vector< int > tmp_owned_ids;
3612 ErrCode iMOAB_WriteMappingWeightsToFile(
3617 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3618 assert( remap_weights_filename && strlen( remap_weights_filename ) );
3624 TempestMapAppData& tdata = data_intx.tempestData;
3627 assert( tdata.remapper != NULL );
3631 assert( weightMap != NULL );
3633 std::string filename = std::string( remap_weights_filename );
3635 std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3636 std::map< std::string, std::string > attrMap;
3637 attrMap[
"title"] =
"MOAB-TempestRemap Online Regridding Weight Generator";
3638 attrMap[
"normalization"] =
"ovarea";
3639 attrMap[
"map_aPb"] = filename;
3641 const std::string delim =
";";
3642 size_t pos = 0, index = 0;
3643 std::vector< std::string > stringAttr( 3 );
3645 while( ( pos = metadataStr.find( delim ) ) != std::string::npos )
3647 std::string token1 = metadataStr.substr( 0, pos );
3648 if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1;
3650 0, pos + delim.length() );
3652 stringAttr[index] = metadataStr;
3653 assert( index == 2 );
3654 attrMap[
"remap_options"] = stringAttr[0];
3655 attrMap[
"methodorder_b"] = stringAttr[1];
3656 attrMap[
"methodorder_a"] = stringAttr[2];
3657 attrMap[
"concave_a"] =
"false";
3658 attrMap[
"concave_b"] =
"false";
3659 attrMap[
"bubble"] =
"true";
3668 #ifdef MOAB_HAVE_MPI
3672 MPI_Comm* jointcomm,
3680 assert( jointcomm );
3683 bool is_fortran =
false;
3684 if( *pid1 >= 0 ) is_fortran =
context.
appDatas[*pid1].is_fortran || is_fortran;
3685 if( *pid2 >= 0 ) is_fortran =
context.
appDatas[*pid2].is_fortran || is_fortran;
3686 if( *pid3 >= 0 ) is_fortran =
context.
appDatas[*pid3].is_fortran || is_fortran;
3688 MPI_Comm joint_communicator =
3689 ( is_fortran ? MPI_Comm_f2c( *
reinterpret_cast< MPI_Fint*
>( jointcomm ) ) : *jointcomm );
3690 MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( groupA ) ) : *groupA );
3691 MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *
reinterpret_cast< MPI_Fint*
>( groupB ) ) : *groupB );
3694 int localRank = 0, numProcs = 1;
3697 MPI_Comm_rank( joint_communicator, &localRank );
3698 MPI_Comm_size( joint_communicator, &numProcs );
3704 if( *pid1 >= 0 ) cgraph =
new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3707 if( *pid3 >= 0 ) cgraph_rev =
new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3728 int lenTagType1 = 1;
3736 std::vector< int > valuesComp1;
3739 Range ents_of_interest;
3750 valuesComp1.resize( ents_of_interest.
size() * lenTagType1 );
3753 else if( *type == 2 )
3756 valuesComp1.resize( ents_of_interest.
size() );
3759 else if( *type == 3 )
3762 valuesComp1.resize( ents_of_interest.
size() );
3771 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3772 TLcomp1.
resize( uniq.size() );
3773 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3777 int to_proc = marker % numProcs;
3778 int n = TLcomp1.
get_n();
3779 TLcomp1.
vi_wr[2 * n] = to_proc;
3780 TLcomp1.
vi_wr[2 * n + 1] = marker;
3786 pc.crystal_router()->gs_transfer( 1, TLcomp1,
3790 std::stringstream ff1;
3791 ff1 <<
"TLcomp1_" << localRank <<
".txt";
3795 sort_buffer.buffer_init( TLcomp1.
get_n() );
3796 TLcomp1.
sort( 1, &sort_buffer );
3797 sort_buffer.
reset();
3809 std::vector< int > valuesComp2;
3813 TempestMapAppData& tdata = data2.tempestData;
3815 assert( tdata.weightMaps.size() == 1 );
3817 weightMap = tdata.weightMaps.begin()->second;
3820 if( *direction == 1 )
3826 else if( *direction == 2 )
3834 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3835 TLcomp2.
resize( uniq.size() );
3836 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3840 int to_proc = marker % numProcs;
3841 int n = TLcomp2.
get_n();
3842 TLcomp2.
vi_wr[2 * n] = to_proc;
3843 TLcomp2.
vi_wr[2 * n + 1] = marker;
3847 pc.crystal_router()->gs_transfer( 1, TLcomp2,
3852 std::stringstream ff2;
3853 ff2 <<
"TLcomp2_" << localRank <<
".txt";
3856 sort_buffer.buffer_reserve( TLcomp2.
get_n() );
3857 TLcomp2.
sort( 1, &sort_buffer );
3858 sort_buffer.
reset();
3880 int n1 = TLcomp1.
get_n();
3881 int n2 = TLcomp2.
get_n();
3883 int indexInTLComp1 = 0;
3884 int indexInTLComp2 = 0;
3885 if( n1 > 0 && n2 > 0 )
3888 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3890 int currentValue1 = TLcomp1.
vi_rd[2 * indexInTLComp1 + 1];
3891 int currentValue2 = TLcomp2.
vi_rd[2 * indexInTLComp2 + 1];
3892 if( currentValue1 < currentValue2 )
3900 if( currentValue1 > currentValue2 )
3908 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.
vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3910 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.
vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3913 for(
int i1 = 0; i1 < size1; i1++ )
3915 for(
int i2 = 0; i2 < size2; i2++ )
3918 int n = TLBackToComp1.
get_n();
3920 TLBackToComp1.
vi_wr[3 * n] =
3921 TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3923 TLBackToComp1.
vi_wr[3 * n + 1] = currentValue1;
3924 TLBackToComp1.
vi_wr[3 * n + 2] = TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3925 n = TLBackToComp2.
get_n();
3927 TLBackToComp2.
vi_wr[3 * n] =
3928 TLcomp2.
vi_rd[2 * ( indexInTLComp2 + i2 )];
3929 TLBackToComp2.
vi_wr[3 * n + 1] = currentValue1;
3930 TLBackToComp2.
vi_wr[3 * n + 2] = TLcomp1.
vi_rd[2 * ( indexInTLComp1 + i1 )];
3934 indexInTLComp1 += size1;
3935 indexInTLComp2 += size2;
3938 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
3939 pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3951 std::stringstream f1;
3952 f1 <<
"TLBack1_" << localRank <<
".txt";
3957 rval = cgraph->
set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );
MB_CHK_ERR( rval );
3970 int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
3976 pc.crystal_router()->gs_transfer( 1, TLv, 0 );
3977 if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );
3983 Range primary_ents3;
3984 std::vector< int > values_entities;
3989 if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
3991 assert( *pid2 >= 0 );
3993 TempestMapAppData& tdata = dataIntx.tempestData;
3996 if( 1 == *direction )
3998 tdata.pid_src = pid3;
4006 tdata.pid_dest = pid3;
4054 constexpr
bool validate =
true;
4055 constexpr
bool enforceConvexity =
true;
4056 constexpr
bool use_kdtree_search =
true;
4057 constexpr
bool gnomonic =
true;
4058 constexpr
double defaultradius = 1.0;
4059 constexpr
double boxeps = 1.e-10;
4062 const double epsrel = ReferenceTolerance;
4063 double radius_source = 1.0;
4064 double radius_target = 1.0;
4074 #ifdef MOAB_HAVE_MPI
4079 TempestMapAppData& tdata = data_intx.tempestData;
4082 bool is_parallel =
false, is_root =
true;
4084 #ifdef MOAB_HAVE_MPI
4087 rank = pco_intx->
rank();
4088 is_parallel = ( pco_intx->
size() > 1 );
4089 is_root = ( rank == 0 );
4095 outputFormatter.set_prefix(
"[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4101 rval = ComputeSphereRadius( pid_src, &radius_source );
MB_CHK_ERR( rval );
4102 rval = ComputeSphereRadius( pid_tgt, &radius_target );
MB_CHK_ERR( rval );
4105 outputFormatter.printf( 0,
"Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4111 if( fabs( radius_source - radius_target ) > 1e-10 )
4118 #ifdef MOAB_HAVE_TEMPESTREMAP
4133 if( enforceConvexity )
4143 if( enforceConvexity )
4151 outputFormatter.printf( 0,
"The source set contains %d vertices and %d elements \n", rintxverts.
size(),
4152 rintxelems.
size() );
4153 outputFormatter.printf( 0,
"The target set contains %d vertices and %d elements \n", bintxverts.
size(),
4154 bintxelems.
size() );
4164 tdata.pid_src = pid_src;
4165 tdata.pid_dest = pid_tgt;
4168 #ifdef MOAB_HAVE_MPI
4173 tdata.remapper->meshValidate = validate;
4174 tdata.remapper->constructEdgeMap =
true;
4177 tdata.remapper->initialize(
false );
4182 #ifdef MOAB_HAVE_MPI
4186 outputFormatter.printf( 0,
"Generating %d ghost layers for the source mesh\n", data_src.
num_ghost_layers );
4203 #ifdef MOAB_HAVE_MPI
4207 outputFormatter.printf( 0,
"Generating %d ghost layers for the target mesh\n", data_src.
num_ghost_layers );
4228 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false, gnomonic, data_src.
num_ghost_layers );
MB_CHK_ERR( rval );
4237 double local_areas[3], global_areas[3];
4238 local_areas[0] = areaAdaptor.area_on_sphere(
context.
MBI, data_src.
file_set, defaultradius );
4239 local_areas[1] = areaAdaptor.area_on_sphere(
context.
MBI, data_tgt.
file_set, defaultradius );
4240 local_areas[2] = areaAdaptor.area_on_sphere(
context.
MBI, data_intx.
file_set, radius_source );
4241 #ifdef MOAB_HAVE_MPI
4242 global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4243 MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->
comm() );
4245 global_areas[0] = local_areas[0];
4246 global_areas[1] = local_areas[1];
4247 global_areas[2] = local_areas[2];
4252 outputFormatter.printf( 0,
4253 "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4254 "overlap mesh = %12.14f\n",
4255 global_areas[0], global_areas[1], global_areas[2] );
4256 outputFormatter.printf( 0,
" relative error w.r.t source = %12.14e, and target = %12.14e\n",
4257 fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4258 fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4270 double radius_source = 1.0;
4271 double radius_target = 1.0;
4272 const double epsrel = ReferenceTolerance;
4273 const double boxeps = 1.e-8;
4279 #ifdef MOAB_HAVE_MPI
4284 TempestMapAppData& tdata = data_intx.tempestData;
4287 #ifdef MOAB_HAVE_MPI
4298 ComputeSphereRadius( pid_src, &radius_source );
4299 ComputeSphereRadius( pid_tgt, &radius_target );
4310 std::cout <<
"The red set contains " << rintxverts.
size() <<
" vertices and " << rintxelems.
size()
4320 std::cout <<
"The blue set contains " << bintxverts.
size() <<
" vertices and " << bintxelems.
size()
4328 tdata.pid_src = pid_src;
4329 tdata.pid_dest = pid_tgt;
4334 #ifdef MOAB_HAVE_MPI
4339 tdata.remapper->meshValidate =
true;
4340 tdata.remapper->constructEdgeMap =
true;
4343 tdata.remapper->initialize(
false );
4353 if( fabs( radius_source - radius_target ) > 1e-10 )
4363 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps,
false );
MB_CHK_ERR( rval );
4365 #ifdef MOAB_HAVE_MPI
4374 rval = tdata.remapper->ComputeGlobalLocalMaps();
MB_CHK_ERR( rval );
4379 ErrCode iMOAB_ComputeScalarProjectionWeights(
4383 int* disc_order_source,
4385 int* disc_order_target,
4388 int* fMonotoneTypeID,
4390 int* fInverseDistanceMap,
4391 int* fNoConservation,
4398 assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4399 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4400 assert( disc_method_source && strlen( disc_method_source ) );
4401 assert( disc_method_target && strlen( disc_method_target ) );
4402 assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4403 assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4407 TempestMapAppData& tdata = data_intx.tempestData;
4411 tdata.weightMaps[std::string( solution_weights_identifier )] =
new moab::TempestOnlineMap( tdata.remapper );
4415 assert( weightMap != NULL );
4417 GenerateOfflineMapAlgorithmOptions mapOptions;
4418 mapOptions.nPin = *disc_order_source;
4419 mapOptions.nPout = *disc_order_target;
4420 mapOptions.fSourceConcave =
false;
4421 mapOptions.fTargetConcave =
false;
4423 mapOptions.strMethod =
"";
4424 if( fv_method ) mapOptions.strMethod += std::string( fv_method ) +
";";
4425 if( fMonotoneTypeID )
4427 switch( *fMonotoneTypeID )
4430 mapOptions.fMonotone =
false;
4433 mapOptions.strMethod +=
"mono3;";
4436 mapOptions.strMethod +=
"mono2;";
4439 mapOptions.fMonotone =
true;
4443 mapOptions.fMonotone =
false;
4445 mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4446 mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4447 mapOptions.fNoCorrectAreas =
false;
4449 mapOptions.fNoCheck =
true;
4450 if( fVolumetric && *fVolumetric ) mapOptions.strMethod +=
"volumetric;";
4451 if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod +=
"invdist;";
4453 std::string metadataStr = mapOptions.strMethod +
";" + std::string( disc_method_source ) +
":" +
4454 std::to_string( *disc_order_source ) +
":" + std::string( source_solution_tag_dof_name ) +
4455 ";" + std::string( disc_method_target ) +
":" + std::to_string( *disc_order_target ) +
4456 ":" + std::string( target_solution_tag_dof_name );
4458 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4464 std::string( disc_method_source ),
4465 std::string( disc_method_target ),
4467 std::string( source_solution_tag_dof_name ),
4468 std::string( target_solution_tag_dof_name )
4474 if( fValidate && *fValidate )
4476 const double dNormalTolerance = 1.0E-8;
4477 const double dStrictTolerance = 1.0E-12;
4478 weightMap->CheckMap(
true,
true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4484 ErrCode iMOAB_ApplyScalarProjectionWeights(
4491 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4492 assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4493 assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4499 TempestMapAppData& tdata = data_intx.tempestData;
4503 if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) )
4508 std::vector< std::string > srcNames;
4509 std::vector< std::string > tgtNames;
4510 std::vector< Tag > srcTagHandles;
4511 std::vector< Tag > tgtTagHandles;
4512 std::string separator(
":" );
4513 std::string src_name( source_solution_tag_name );
4514 std::string tgt_name( target_solution_tag_name );
4517 if( srcNames.size() != tgtNames.size() )
4519 std::cout <<
" error in parsing source and target tag names. \n";
4520 return moab::MB_FAILURE;
4523 for(
size_t i = 0; i < srcNames.size(); i++ )
4527 if(
MB_SUCCESS != rval || NULL == tagHandle )
4529 return moab::MB_FAILURE;
4531 srcTagHandles.push_back( tagHandle );
4533 if(
MB_SUCCESS != rval || NULL == tagHandle )
4535 return moab::MB_FAILURE;
4537 tgtTagHandles.push_back( tagHandle );
4540 std::vector< double > solSTagVals;
4541 std::vector< double > solTTagVals;
4551 solSTagVals.resize( covSrcEnts.
size(), 0. );
4564 solTTagVals.resize( tgtEnts.
size(), 0. );
4593 switch( *filter_type )
4609 for(
size_t i = 0; i < srcTagHandles.size(); i++ )
4614 rval = weightMap->
ApplyWeights( srcTagHandles[i], tgtTagHandles[i],
false, caasType );
MB_CHK_ERR( rval );
4623 Tag ssolnTag = srcTagHandles[ivar];
4624 std::stringstream sstr;
4625 sstr <<
"covsrcTagData_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
4626 std::ofstream output_file( sstr.str().c_str() );
4627 for(
unsigned i = 0; i < sents.
size(); ++i )
4630 std::vector< double > locsolSTagVals( 16 );
4633 for(
unsigned j = 0; j < 16; ++j )
4634 output_file << locsolSTagVals[j] <<
" ";
4636 output_file.flush();
4637 output_file.close();
4640 std::stringstream sstr;
4641 sstr <<
"outputSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4647 std::stringstream sstr;
4648 sstr <<
"outputCovSrcDest_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".h5m";
4655 std::stringstream sstr;
4656 sstr <<
"covMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
4662 std::stringstream sstr;
4663 sstr <<
"tgtMesh_" << *pid_intersection <<
"_" << pco_intx->
rank() <<
".vtk";
4669 std::stringstream sstr;
4670 sstr <<
"colvector_" << *pid_intersection <<
"_" << ivar <<
"_" << pco_intx->
rank() <<
".txt";
4671 std::ofstream output_file( sstr.str().c_str() );
4672 for(
unsigned i = 0; i < solSTagVals.size(); ++i )
4673 output_file << i <<
" " << weightMap->col_dofmap[i] <<
" " << weightMap->
col_gdofmap[i] <<
" "
4674 << solSTagVals[i] <<
"\n";
4675 output_file.flush();
4676 output_file.close();