Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
iMOAB.cpp
Go to the documentation of this file.
1 /** \file iMOAB.cpp 2  */ 3  4 #include "moab/MOABConfig.h" 5 #include "moab/Core.hpp" 6  7 #ifdef MOAB_HAVE_MPI 8 #include "moab_mpi.h" 9 #include "moab/ParallelComm.hpp" 10 #include "moab/ParCommGraph.hpp" 11 #include "moab/ParallelMergeMesh.hpp" 12 #include "moab/IntxMesh/IntxUtils.hpp" 13 #endif 14 #include "DebugOutput.hpp" 15 #include "moab/iMOAB.h" 16  17 /* this is needed because of direct access to hdf5/mhdf */ 18 #ifdef MOAB_HAVE_HDF5 19 #include "mhdf.h" 20 #include <H5Tpublic.h> 21 #endif 22  23 #include "moab/CartVect.hpp" 24 #include "MBTagConventions.hpp" 25 #include "moab/MeshTopoUtil.hpp" 26 #include "moab/ReadUtilIface.hpp" 27 #include "moab/MergeMesh.hpp" 28  29 #ifdef MOAB_HAVE_TEMPESTREMAP 30 #include "STLStringHelper.h" 31 #include "moab/IntxMesh/IntxUtils.hpp" 32  33 #include "moab/Remapping/TempestRemapper.hpp" 34 #include "moab/Remapping/TempestOnlineMap.hpp" 35 #endif 36  37 // C++ includes 38 #include <cassert> 39 #include <sstream> 40 #include <iostream> 41  42 using namespace moab; 43  44 // #define VERBOSE 45  46 // global variables ; should they be organized in a structure, for easier references? 47 // or how do we keep them global? 48  49 #ifdef __cplusplus 50 extern "C" { 51 #endif 52  53 #ifdef MOAB_HAVE_TEMPESTREMAP 54 struct TempestMapAppData 55 { 56  moab::TempestRemapper* remapper; 57  std::map< std::string, moab::TempestOnlineMap* > weightMaps; 58  iMOAB_AppID pid_src; 59  iMOAB_AppID pid_dest; 60  int num_src_ghost_layers; // number of ghost layers 61  int num_tgt_ghost_layers; // number of ghost layers 62 }; 63 #endif 64  65 struct appData 66 { 67  EntityHandle file_set; // primary file set containing all entities of interest 68  int global_id; // external component id, unique for application 69  std::string name; // name of the application 70  Range all_verts; // local vertices would be all_verts if no ghosting was required 71  Range local_verts; // it could include shared, but not owned at the interface 72  Range owned_verts; // owned_verts <= local_verts <= all_verts 73  Range ghost_vertices; // locally ghosted from other processors 74  Range primary_elems; // all primary entities (owned + ghosted) 75  Range owned_elems; // only owned entities 76  Range ghost_elems; // only ghosted entities (filtered) 77  int dimension; // 2 or 3, dimension of primary elements (redundant?) 78  long num_global_elements; // reunion of all elements in primary_elements; either from hdf5 79  // reading or from reduce 80  long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be 81  // determined from hdf5 reading 82  int num_ghost_layers; // number of ghost layers 83  Range mat_sets; 84  std::map< int, int > matIndex; // map from global block id to index in mat_sets 85  Range neu_sets; 86  Range diri_sets; 87  std::map< std::string, Tag > tagMap; 88  std::vector< Tag > tagList; 89  bool point_cloud; 90  bool is_fortran; 91  92 #ifdef MOAB_HAVE_MPI 93  ParallelComm* pcomm; 94  // constructor for this ParCommGraph takes the joint comm and the MPI groups for each 95  // application 96  std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph* 97 #endif 98  99 #ifdef MOAB_HAVE_TEMPESTREMAP 100  EntityHandle secondary_file_set; // secondary file set (typically a child set like covering mesh) 101  TempestMapAppData tempestData; 102  std::map< std::string, std::string > metadataMap; 103 #endif 104 }; 105  106 struct GlobalContext 107 { 108  // are there reasons to have multiple moab inits? Is ref count needed? 109  Interface* MBI; 110  // we should also have the default tags stored, initialized 111  Tag material_tag, neumann_tag, dirichlet_tag, 112  globalID_tag; // material, neumann, dirichlet, globalID 113  int refCountMB; 114  int iArgc; 115  iMOAB_String* iArgv; 116  117  std::map< std::string, int > appIdMap; // from app string (uppercase) to app id 118  std::map< int, appData > appDatas; // the same order as pcomms 119  int globalrank, worldprocs; 120  bool MPI_initialized; 121  122  GlobalContext() 123  { 124  MBI = 0; 125  refCountMB = 0; 126  } 127 }; 128  129 static struct GlobalContext context; 130  131 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv ) 132 { 133  if( argc ) IMOAB_CHECKPOINTER( argv, 1 ); 134  135  context.iArgc = argc; 136  context.iArgv = argv; // shallow copy 137  138  if( 0 == context.refCountMB ) 139  { 140  context.MBI = new( std::nothrow ) moab::Core; 141  // retrieve the default tags 142  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME, 143  DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME }; 144  // materials (blocks), surface-BC (neumann), vertex-BC (Dirichlet), global-id 145  Tag gtags[4]; 146  for( int i = 0; i < 4; i++ ) 147  { 148  MB_CHK_ERR( 149  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY ) ); 150  } 151  // now set the relevant (standard) tags for future use 152  context.material_tag = gtags[0]; 153  context.neumann_tag = gtags[1]; 154  context.dirichlet_tag = gtags[2]; 155  context.globalID_tag = gtags[3]; 156  157  context.MPI_initialized = false; 158 #ifdef MOAB_HAVE_MPI 159  int flagInit; 160  MPI_Initialized( &flagInit ); 161  162  if( flagInit && !context.MPI_initialized ) 163  { 164  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs ); 165  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank ); 166  context.MPI_initialized = true; 167  } 168 #endif 169  } 170  171  context.refCountMB++; 172  return moab::MB_SUCCESS; 173 } 174  175 ErrCode iMOAB_InitializeFortran() 176 { 177  return iMOAB_Initialize( 0, 0 ); 178 } 179  180 ErrCode iMOAB_Finalize() 181 { 182  context.refCountMB--; 183  184  if( 0 == context.refCountMB ) 185  { 186  delete context.MBI; 187  } 188  189  return MB_SUCCESS; 190 } 191  192 // A string to integer hash function that is fast and robust 193 // Based on the Fowler–Noll–Vo (or FNV) algorithm 194 static int apphash( const iMOAB_String str, int identifier = 0 ) 195 { 196  // A minimal perfect hash function (MPHF) 197  std::string appstr( str ); 198  // Fowler–Noll–Vo (or FNV) is a non-cryptographic hash function created 199  // by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo. 200  // Ref: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function 201  unsigned int h = 2166136261u; // FNV offset basis 202  for( char c : appstr ) 203  { 204  h ^= static_cast< unsigned char >( c ); 205  h *= 16777619u; // FNV prime 206  } 207  // Incorporate the integer into the hash 208  if( identifier ) 209  { 210  h ^= static_cast< unsigned int >( identifier & 0xFFFFFFFF ); 211  h *= 16777619u; // FNV prime again 212  } 213  return static_cast< int >( h & 0x7FFFFFFF ); // Mask to ensure positive value 214 } 215  216 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name, 217 #ifdef MOAB_HAVE_MPI 218  MPI_Comm* comm, 219 #endif 220  int* compid, 221  iMOAB_AppID pid ) 222 { 223  IMOAB_CHECKPOINTER( app_name, 1 ); 224 #ifdef MOAB_HAVE_MPI 225  IMOAB_CHECKPOINTER( comm, 2 ); 226  IMOAB_CHECKPOINTER( compid, 3 ); 227 #else 228  IMOAB_CHECKPOINTER( compid, 2 ); 229 #endif 230  231  // will create a parallel comm for this application too, so there will be a 232  // mapping from *pid to file set and to parallel comm instances 233  std::string name( app_name ); 234  235  if( context.appIdMap.find( name ) != context.appIdMap.end() ) 236  { 237  std::cout << " application " << name << " already registered \n"; 238  return moab::MB_FAILURE; 239  } 240  241  // trivial hash: just use the id that the user provided and assume it is unique 242  // *pid = *compid; 243  // hash: compute a unique hash as a combination of the appname and the component id 244  *pid = apphash( app_name, *compid ); 245  // store map of application name and the ID we just generated 246  context.appIdMap[name] = *pid; 247  int rankHere = 0; 248 #ifdef MOAB_HAVE_MPI 249  MPI_Comm_rank( *comm, &rankHere ); 250 #endif 251  if( !rankHere ) 252  std::cout << " application " << name << " with ID = " << *pid << " and external id: " << *compid 253  << " is registered now \n"; 254  if( *compid <= 0 ) 255  { 256  std::cout << " convention for external application is to have its id positive \n"; 257  return moab::MB_FAILURE; 258  } 259  260  // create now the file set that will be used for loading the model in 261  EntityHandle file_set; 262  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval ); 263  264  appData app_data; 265  app_data.file_set = file_set; 266  app_data.global_id = *compid; // will be used mostly for par comm graph 267  app_data.name = name; // save the name of application 268  269 #ifdef MOAB_HAVE_TEMPESTREMAP 270  app_data.tempestData.remapper = nullptr; // Only allocate as needed 271  app_data.tempestData.num_src_ghost_layers = 0; 272  app_data.tempestData.num_tgt_ghost_layers = 0; 273 #endif 274  275  // set some default values 276  app_data.num_ghost_layers = 0; 277  app_data.point_cloud = false; 278  app_data.is_fortran = false; 279 #ifdef MOAB_HAVE_TEMPESTREMAP 280  app_data.secondary_file_set = app_data.file_set; 281 #endif 282  283 #ifdef MOAB_HAVE_MPI 284  if( *comm ) app_data.pcomm = new ParallelComm( context.MBI, *comm ); 285 #endif 286  context.appDatas[*pid] = app_data; // it will correspond to app_FileSets[*pid] will be the file set of interest 287  return moab::MB_SUCCESS; 288 } 289  290 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name, 291 #ifdef MOAB_HAVE_MPI 292  int* comm, 293 #endif 294  int* compid, 295  iMOAB_AppID pid ) 296 { 297  IMOAB_CHECKPOINTER( app_name, 1 ); 298 #ifdef MOAB_HAVE_MPI 299  IMOAB_CHECKPOINTER( comm, 2 ); 300  IMOAB_CHECKPOINTER( compid, 3 ); 301 #else 302  IMOAB_CHECKPOINTER( compid, 2 ); 303 #endif 304  305  ErrCode err; 306  assert( app_name != nullptr ); 307  std::string name( app_name ); 308  309 #ifdef MOAB_HAVE_MPI 310  MPI_Comm ccomm; 311  if( comm ) 312  { 313  // Convert from Fortran communicator to a C communicator 314  // NOTE: see transfer of handles at 315  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm 316  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm ); 317  } 318 #endif 319  320  // now call C style registration function: 321  err = iMOAB_RegisterApplication( app_name, 322 #ifdef MOAB_HAVE_MPI 323  &ccomm, 324 #endif 325  compid, pid ); 326  327  // Now that we have created the application context, store that 328  // the application being registered is from a Fortran context 329  context.appDatas[*pid].is_fortran = true; 330  331  return err; 332 } 333  334 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid ) 335 { 336  // the file set , parallel comm are all in vectors indexed by *pid 337  // assume we did not delete anything yet 338  // *pid will not be reused if we register another application 339  auto appIterator = context.appDatas.find( *pid ); 340  // ensure that the application exists before we attempt to delete it 341  if( appIterator == context.appDatas.end() ) return MB_FAILURE; 342  343  // we found the application 344  appData& data = appIterator->second; 345  int rankHere = 0; 346 #ifdef MOAB_HAVE_MPI 347  rankHere = data.pcomm->rank(); 348 #endif 349  if( !rankHere ) 350  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name 351  << " is de-registered now \n"; 352  353  EntityHandle fileSet = data.file_set; 354  // get all entities part of the file set 355  Range fileents; 356  MB_CHK_ERR( context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true ) ); 357  fileents.insert( fileSet ); 358  MB_CHK_ERR( context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents ) ); // append all mesh sets 359  360 #ifdef MOAB_HAVE_TEMPESTREMAP 361  if( data.tempestData.remapper ) delete data.tempestData.remapper; 362  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear(); 363 #endif 364  365 #ifdef MOAB_HAVE_MPI 366  // NOTE: we could get the pco also with the following workflow. 367  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid); 368  369  auto& pargs = data.pgraph; 370  // free the parallel comm graphs associated with this app 371  for( auto mt = pargs.begin(); mt != pargs.end(); ++mt ) 372  { 373  ParCommGraph* pgr = mt->second; 374  if( pgr != nullptr ) 375  { 376  delete pgr; 377  pgr = nullptr; 378  } 379  } 380  // now free the ParallelComm resources 381  if( data.pcomm ) 382  { 383  delete data.pcomm; 384  data.pcomm = nullptr; 385  } 386 #endif 387  388  // delete first all except vertices 389  Range vertices = fileents.subset_by_type( MBVERTEX ); 390  Range noverts = subtract( fileents, vertices ); 391  392  MB_CHK_ERR( context.MBI->delete_entities( noverts ) ); 393  // now retrieve connected elements that still exist (maybe in other sets, pids?) 394  Range adj_ents_left; 395  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION ) ); 396  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION ) ); 397  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION ) ); 398  399  if( !adj_ents_left.empty() ) 400  { 401  Range conn_verts; 402  MB_CHK_ERR( context.MBI->get_connectivity( adj_ents_left, conn_verts ) ); 403  vertices = subtract( vertices, conn_verts ); 404  } 405  406  MB_CHK_ERR( context.MBI->delete_entities( vertices ) ); 407  408  for( auto mit = context.appIdMap.begin(); mit != context.appIdMap.end(); ++mit ) 409  { 410  if( *pid == mit->second ) 411  { 412 #ifdef MOAB_HAVE_MPI 413  if( data.pcomm ) 414  { 415  delete data.pcomm; 416  data.pcomm = nullptr; 417  } 418 #endif 419  context.appIdMap.erase( mit ); 420  break; 421  } 422  } 423  424  // now we can finally delete the application data itself 425  context.appDatas.erase( appIterator ); 426  427  return moab::MB_SUCCESS; 428 } 429  430 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid ) 431 { 432  // release any Fortran specific allocations here before we pass it on 433  context.appDatas[*pid].is_fortran = false; 434  435  // release all datastructure allocations 436  return iMOAB_DeregisterApplication( pid ); 437 } 438  439 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename, 440  int* num_global_vertices, 441  int* num_global_elements, 442  int* num_dimension, 443  int* num_parts ) 444 { 445  IMOAB_CHECKPOINTER( filename, 1 ); 446  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." ); 447  448 #ifdef MOAB_HAVE_HDF5 449  std::string filen( filename ); 450  451  int edges = 0; 452  int faces = 0; 453  int regions = 0; 454  if( num_global_vertices ) *num_global_vertices = 0; 455  if( num_global_elements ) *num_global_elements = 0; 456  if( num_dimension ) *num_dimension = 0; 457  if( num_parts ) *num_parts = 0; 458  459  mhdf_FileHandle file; 460  mhdf_Status status; 461  unsigned long max_id; 462  struct mhdf_FileDesc* data; 463  464  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status ); 465  466  if( mhdf_isError( &status ) ) 467  { 468  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) ); 469  return moab::MB_FAILURE; 470  } 471  472  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status, 473  1 ); // will use extra set info; will get parallel partition tag info too! 474  475  if( mhdf_isError( &status ) ) 476  { 477  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) ); 478  return moab::MB_FAILURE; 479  } 480  481  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent; 482  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count; 483  484  for( int i = 0; i < data->num_elem_desc; i++ ) 485  { 486  struct mhdf_ElemDesc* el_desc = &( data->elems[i] ); 487  struct mhdf_EntDesc* ent_d = &( el_desc->desc ); 488  489  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) ) 490  { 491  edges += ent_d->count; 492  } 493  494  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) ) 495  { 496  faces += ent_d->count; 497  } 498  499  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) ) 500  { 501  faces += ent_d->count; 502  } 503  504  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) ) 505  { 506  faces += ent_d->count; 507  } 508  509  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) ) 510  { 511  regions += ent_d->count; 512  } 513  514  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) ) 515  { 516  regions += ent_d->count; 517  } 518  519  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) ) 520  { 521  regions += ent_d->count; 522  } 523  524  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) ) 525  { 526  regions += ent_d->count; 527  } 528  529  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) ) 530  { 531  regions += ent_d->count; 532  } 533  534  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) ) 535  { 536  regions += ent_d->count; 537  } 538  539  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) ) 540  { 541  regions += ent_d->count; 542  } 543  } 544  545  if( num_parts ) *num_parts = data->numEntSets[0]; 546  547  // is this required? 548  if( edges > 0 ) 549  { 550  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1 551  if( num_global_elements ) *num_global_elements = edges; 552  } 553  554  if( faces > 0 ) 555  { 556  if( num_dimension ) *num_dimension = 2; 557  if( num_global_elements ) *num_global_elements = faces; 558  } 559  560  if( regions > 0 ) 561  { 562  if( num_dimension ) *num_dimension = 3; 563  if( num_global_elements ) *num_global_elements = regions; 564  } 565  566  mhdf_closeFile( file, &status ); 567  568  free( data ); 569  570 #else 571  std::cout << filename 572  << ": Please reconfigure with HDF5. Cannot retrieve header information for file " 573  "formats other than a h5m file.\n"; 574  if( num_global_vertices ) *num_global_vertices = 0; 575  if( num_global_elements ) *num_global_elements = 0; 576  if( num_dimension ) *num_dimension = 0; 577  if( num_parts ) *num_parts = 0; 578 #endif 579  580  return moab::MB_SUCCESS; 581 } 582  583 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid, 584  const iMOAB_String filename, 585  const iMOAB_String read_options, 586  int* num_ghost_layers ) 587 { 588  IMOAB_CHECKPOINTER( filename, 2 ); 589  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." ); 590  IMOAB_CHECKPOINTER( num_ghost_layers, 4 ); 591  592  // make sure we use the file set and pcomm associated with the *pid 593  std::ostringstream newopts; 594  if( read_options ) newopts << read_options; 595  596 #ifdef MOAB_HAVE_MPI 597  598  if( context.MPI_initialized ) 599  { 600  if( context.worldprocs > 1 ) 601  { 602  std::string opts( ( read_options ? read_options : "" ) ); 603  std::string pcid( "PARALLEL_COMM=" ); 604  std::size_t found = opts.find( pcid ); 605  606  if( found != std::string::npos ) 607  { 608  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n"; 609  return moab::MB_FAILURE; 610  } 611  612  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g 613  // files (used in test_remapping) 614  std::string filen( filename ); 615  std::string::size_type idx = filen.rfind( '.' ); 616  617  if( idx != std::string::npos ) 618  { 619  ParallelComm* pco = context.appDatas[*pid].pcomm; 620  std::string extension = filen.substr( idx + 1 ); 621  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) ) 622  newopts << ";;PARALLEL_COMM=" << pco->get_id(); 623  } 624  625  if( *num_ghost_layers >= 1 ) 626  { 627  // if we want ghosts, we will want additional entities, the last .1 628  // because the addl ents can be edges, faces that are part of the neumann sets 629  std::string pcid2( "PARALLEL_GHOSTS=" ); 630  std::size_t found2 = opts.find( pcid2 ); 631  632  if( found2 != std::string::npos ) 633  { 634  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed " 635  "number of layers \n"; 636  } 637  else 638  { 639  // dimension of primary entities is 3 here, but it could be 2 for climate 640  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for 641  // example: ";PARALLEL_GHOSTS=2.0.1" 642  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3"; 643  } 644  } 645  } 646  } 647 #else 648  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." ); 649 #endif 650  651  // Now let us actually load the MOAB file with the appropriate read options 652  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval ); 653  654 #ifdef VERBOSE 655  // some debugging stuff 656  std::ostringstream outfile; 657 #ifdef MOAB_HAVE_MPI 658  ParallelComm* pco = context.appDatas[*pid].pcomm; 659  int rank = pco->rank(); 660  int nprocs = pco->size(); 661  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m"; 662 #else 663  outfile << "TaskMesh_n1.0.h5m"; 664 #endif 665  // the mesh contains ghosts too, but they are not part of mat/neumann set 666  // write in serial the file, to see what tags are missing 667  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial 668 #endif 669  670  // Update ghost layer information 671  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers; 672  673  // Update mesh information 674  return iMOAB_UpdateMeshInfo( pid ); 675 } 676  677 static ErrCode internal_WriteMesh( iMOAB_AppID pid, 678  const iMOAB_String filename, 679  const iMOAB_String write_options, 680  bool primary_set = true ) 681 { 682  IMOAB_CHECKPOINTER( filename, 2 ); 683  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." ); 684  685  appData& data = context.appDatas[*pid]; 686  EntityHandle fileSet = data.file_set; 687  688  std::ostringstream newopts; 689 #ifdef MOAB_HAVE_MPI 690  std::string write_opts( ( write_options ? write_options : "" ) ); 691  std::string pcid( "PARALLEL_COMM=" ); 692  693  if( write_opts.find( pcid ) != std::string::npos ) 694  { 695  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n"; 696  return moab::MB_FAILURE; 697  } 698  699  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used 700  std::string pw( "PARALLEL=WRITE_PART" ); 701  if( write_opts.find( pw ) != std::string::npos ) 702  { 703  ParallelComm* pco = data.pcomm; 704  newopts << "PARALLEL_COMM=" << pco->get_id() << ";"; 705  } 706  707 #endif 708  709 #ifdef MOAB_HAVE_TEMPESTREMAP 710  if( !primary_set ) 711  { 712  if( data.tempestData.remapper != nullptr ) 713  fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 714  else if( data.file_set != data.secondary_file_set ) 715  fileSet = data.secondary_file_set; 716  else 717  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" ); 718  } 719 #endif 720  721  // append user write options to the one we have built 722  if( write_options ) newopts << write_options; 723  724  std::vector< Tag > copyTagList = data.tagList; 725  // append Global ID and Parallel Partition 726  std::string gid_name_tag( "GLOBAL_ID" ); 727  728  // export global id tag, we need it always 729  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() ) 730  { 731  Tag gid = context.MBI->globalId_tag(); 732  copyTagList.push_back( gid ); 733  } 734  // also Parallel_Partition PARALLEL_PARTITION 735  std::string pp_name_tag( "PARALLEL_PARTITION" ); 736  737  // write parallel part tag too, if it exists 738  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() ) 739  { 740  Tag ptag; 741  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag ); 742  if( ptag ) copyTagList.push_back( ptag ); 743  } 744  745  // Now let us actually write the file to disk with appropriate options 746  // MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, copyTagList.data(), 747  // copyTagList.size() ) ); 748  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1 ) ); 749  750  return moab::MB_SUCCESS; 751 } 752  753 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options ) 754 { 755  return internal_WriteMesh( pid, filename, write_options ); 756 } 757  758 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix ) 759 { 760  IMOAB_CHECKPOINTER( prefix, 2 ); 761  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." ); 762  763  std::ostringstream file_name; 764  int rank = 0, size = 1; 765 #ifdef MOAB_HAVE_MPI 766  ParallelComm* pcomm = context.appDatas[*pid].pcomm; 767  rank = pcomm->rank(); 768  size = pcomm->size(); 769 #endif 770  file_name << prefix << "_" << size << "_" << rank << ".h5m"; 771  // Now let us actually write the file to disk with appropriate options 772  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval ); 773  774  return moab::MB_SUCCESS; 775 } 776  777 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid ) 778 { 779  // this will include ghost elements info 780  appData& data = context.appDatas[*pid]; 781  EntityHandle fileSet = data.file_set; 782  783  // first clear all data ranges; this can be called after ghosting 784  data.all_verts.clear(); 785  data.primary_elems.clear(); 786  data.local_verts.clear(); 787  data.owned_verts.clear(); 788  data.ghost_vertices.clear(); 789  data.owned_elems.clear(); 790  data.ghost_elems.clear(); 791  data.mat_sets.clear(); 792  data.neu_sets.clear(); 793  data.diri_sets.clear(); 794  795  // Let us get all the vertex entities 796  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive 797  798  // Let us check first entities of dimension = 3 799  data.dimension = 3; 800  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive 801  802  if( data.primary_elems.empty() ) 803  { 804  // Now 3-D elements. Let us check entities of dimension = 2 805  data.dimension = 2; 806  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive 807  808  if( data.primary_elems.empty() ) 809  { 810  // Now 3-D/2-D elements. Let us check entities of dimension = 1 811  data.dimension = 1; 812  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive 813  814  if( data.primary_elems.empty() ) 815  { 816  // no elements of dimension 1 or 2 or 3; it could happen for point clouds 817  data.dimension = 0; 818  } 819  } 820  } 821  822  // check if the current mesh is just a point cloud 823  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 ); 824  825 #ifdef MOAB_HAVE_MPI 826  827  if( context.MPI_initialized ) 828  { 829  ParallelComm* pco = context.appDatas[*pid].pcomm; 830  831  // filter ghost vertices, from local 832  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval ); 833  834  // Store handles for all ghosted entities 835  data.ghost_vertices = subtract( data.all_verts, data.local_verts ); 836  837  // filter ghost elements, from local 838  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval ); 839  840  data.ghost_elems = subtract( data.primary_elems, data.owned_elems ); 841  // now update global number of primary cells and global number of vertices 842  // determine first number of owned vertices 843  // Get local owned vertices 844  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval ); 845  int local[2], global[2]; 846  local[0] = data.owned_verts.size(); 847  local[1] = data.owned_elems.size(); 848  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() ); 849  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval ); 850  } 851  else 852  { 853  data.local_verts = data.all_verts; 854  data.owned_elems = data.primary_elems; 855  } 856  857 #else 858  859  data.local_verts = data.all_verts; 860  data.owned_elems = data.primary_elems; 861  862 #endif 863  864  // Get the references for some standard internal tags such as material blocks, BCs, etc 865  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1, 866  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval ); 867  868  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1, 869  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval ); 870  871  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1, 872  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval ); 873  874  return moab::MB_SUCCESS; 875 } 876  877 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid, 878  int* num_visible_vertices, 879  int* num_visible_elements, 880  int* num_visible_blocks, 881  int* num_visible_surfaceBC, 882  int* num_visible_vertexBC ) 883 { 884  ErrorCode rval; 885  appData& data = context.appDatas[*pid]; 886  EntityHandle fileSet = data.file_set; 887  888  // this will include ghost elements 889  // first clear all data ranges; this can be called after ghosting 890  if( num_visible_elements ) 891  { 892  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() ); 893  // separate ghost and local/owned primary elements 894  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() ); 895  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() ); 896  } 897  if( num_visible_vertices ) 898  { 899  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() ); 900  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() ); 901  // local are those that are not ghosts; they may include shared, not owned vertices 902  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1]; 903  } 904  905  if( num_visible_blocks ) 906  { 907  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1, 908  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval ); 909  910  num_visible_blocks[2] = data.mat_sets.size(); 911  num_visible_blocks[0] = num_visible_blocks[2]; 912  num_visible_blocks[1] = 0; 913  } 914  915  if( num_visible_surfaceBC ) 916  { 917  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1, 918  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval ); 919  920  num_visible_surfaceBC[2] = 0; 921  // count how many faces are in each neu set, and how many regions are 922  // adjacent to them; 923  int numNeuSets = (int)data.neu_sets.size(); 924  925  for( int i = 0; i < numNeuSets; i++ ) 926  { 927  Range subents; 928  EntityHandle nset = data.neu_sets[i]; 929  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval ); 930  931  for( Range::iterator it = subents.begin(); it != subents.end(); ++it ) 932  { 933  EntityHandle subent = *it; 934  Range adjPrimaryEnts; 935  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval ); 936  937  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size(); 938  } 939  } 940  941  num_visible_surfaceBC[0] = num_visible_surfaceBC[2]; 942  num_visible_surfaceBC[1] = 0; 943  } 944  945  if( num_visible_vertexBC ) 946  { 947  rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1, 948  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval ); 949  950  num_visible_vertexBC[2] = 0; 951  int numDiriSets = (int)data.diri_sets.size(); 952  953  for( int i = 0; i < numDiriSets; i++ ) 954  { 955  Range verts; 956  EntityHandle diset = data.diri_sets[i]; 957  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval ); 958  959  num_visible_vertexBC[2] += (int)verts.size(); 960  } 961  962  num_visible_vertexBC[0] = num_visible_vertexBC[2]; 963  num_visible_vertexBC[1] = 0; 964  } 965  966  return moab::MB_SUCCESS; 967 } 968  969 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID ) 970 { 971  IMOAB_CHECKPOINTER( vertices_length, 2 ); 972  IMOAB_CHECKPOINTER( global_vertex_ID, 3 ); 973  974  const Range& verts = context.appDatas[*pid].all_verts; 975  // check for problems with array length 976  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" ); 977  978  // global id tag is context.globalID_tag 979  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID ); 980 } 981  982 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID ) 983 { 984  assert( vertices_length && *vertices_length ); 985  assert( visible_global_rank_ID ); 986  987  Range& verts = context.appDatas[*pid].all_verts; 988  int i = 0; 989 #ifdef MOAB_HAVE_MPI 990  ParallelComm* pco = context.appDatas[*pid].pcomm; 991  992  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ ) 993  { 994  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval ); 995  } 996  997  if( i != *vertices_length ) 998  { 999  return moab::MB_FAILURE; 1000  } // warning array allocation problem 1001  1002 #else 1003  1004  /* everything owned by proc 0 */ 1005  if( (int)verts.size() != *vertices_length ) 1006  { 1007  return moab::MB_FAILURE; 1008  } // warning array allocation problem 1009  1010  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ ) 1011  { 1012  visible_global_rank_ID[i] = 0; 1013  } // all vertices are owned by processor 0, as this is serial run 1014  1015 #endif 1016  1017  return moab::MB_SUCCESS; 1018 } 1019  1020 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates ) 1021 { 1022  Range& verts = context.appDatas[*pid].all_verts; 1023  1024  // interleaved coordinates, so that means deep copy anyway 1025  if( *coords_length != 3 * (int)verts.size() ) 1026  { 1027  return moab::MB_FAILURE; 1028  } 1029  1030  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval ); 1031  1032  return moab::MB_SUCCESS; 1033 } 1034  1035 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs ) 1036 { 1037  // local id blocks? they are counted from 0 to number of visible blocks ... 1038  // will actually return material set tag value for global 1039  Range& matSets = context.appDatas[*pid].mat_sets; 1040  1041  if( *block_length != (int)matSets.size() ) 1042  { 1043  return moab::MB_FAILURE; 1044  } 1045  1046  // return material set tag gtags[0 is material set tag 1047  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval ); 1048  1049  // populate map with index 1050  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex; 1051  for( unsigned i = 0; i < matSets.size(); i++ ) 1052  { 1053  matIdx[global_block_IDs[i]] = i; 1054  } 1055  1056  return moab::MB_SUCCESS; 1057 } 1058  1059 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid, 1060  iMOAB_GlobalID* global_block_ID, 1061  int* vertices_per_element, 1062  int* num_elements_in_block ) 1063 { 1064  assert( global_block_ID ); 1065  1066  std::map< int, int >& matMap = context.appDatas[*pid].matIndex; 1067  std::map< int, int >::iterator it = matMap.find( *global_block_ID ); 1068  1069  if( it == matMap.end() ) 1070  { 1071  return moab::MB_FAILURE; 1072  } // error in finding block with id 1073  1074  int blockIndex = matMap[*global_block_ID]; 1075  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex]; 1076  Range blo_elems; 1077  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems ); 1078  1079  if( MB_SUCCESS != rval || blo_elems.empty() ) 1080  { 1081  return moab::MB_FAILURE; 1082  } 1083  1084  EntityType type = context.MBI->type_from_handle( blo_elems[0] ); 1085  1086  if( !blo_elems.all_of_type( type ) ) 1087  { 1088  return moab::MB_FAILURE; 1089  } // not all of same type 1090  1091  const EntityHandle* conn = nullptr; 1092  int num_verts = 0; 1093  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval ); 1094  1095  *vertices_per_element = num_verts; 1096  *num_elements_in_block = (int)blo_elems.size(); 1097  1098  return moab::MB_SUCCESS; 1099 } 1100  1101 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid, 1102  int* num_visible_elements, 1103  iMOAB_GlobalID* element_global_IDs, 1104  int* ranks, 1105  iMOAB_GlobalID* block_IDs ) 1106 { 1107  appData& data = context.appDatas[*pid]; 1108 #ifdef MOAB_HAVE_MPI 1109  ParallelComm* pco = context.appDatas[*pid].pcomm; 1110 #endif 1111  1112  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval ); 1113  1114  int i = 0; 1115  1116  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i ) 1117  { 1118 #ifdef MOAB_HAVE_MPI 1119  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval ); 1120  1121 #else 1122  /* everything owned by task 0 */ 1123  ranks[i] = 0; 1124 #endif 1125  } 1126  1127  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit ) 1128  { 1129  EntityHandle matMeshSet = *mit; 1130  Range elems; 1131  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval ); 1132  1133  int valMatTag; 1134  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval ); 1135  1136  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit ) 1137  { 1138  EntityHandle eh = *eit; 1139  int index = data.primary_elems.index( eh ); 1140  1141  if( -1 == index ) 1142  { 1143  return moab::MB_FAILURE; 1144  } 1145  1146  if( -1 >= *num_visible_elements ) 1147  { 1148  return moab::MB_FAILURE; 1149  } 1150  1151  block_IDs[index] = valMatTag; 1152  } 1153  } 1154  1155  return moab::MB_SUCCESS; 1156 } 1157  1158 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid, 1159  iMOAB_GlobalID* global_block_ID, 1160  int* connectivity_length, 1161  int* element_connectivity ) 1162 { 1163  assert( global_block_ID ); // ensure global block ID argument is not null 1164  assert( connectivity_length ); // ensure connectivity length argument is not null 1165  1166  appData& data = context.appDatas[*pid]; 1167  std::map< int, int >& matMap = data.matIndex; 1168  std::map< int, int >::iterator it = matMap.find( *global_block_ID ); 1169  1170  if( it == matMap.end() ) 1171  { 1172  return moab::MB_FAILURE; 1173  } // error in finding block with id 1174  1175  int blockIndex = matMap[*global_block_ID]; 1176  EntityHandle matMeshSet = data.mat_sets[blockIndex]; 1177  std::vector< EntityHandle > elems; 1178  1179  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval ); 1180  1181  if( elems.empty() ) 1182  { 1183  return moab::MB_FAILURE; 1184  } 1185  1186  std::vector< EntityHandle > vconnect; 1187  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval ); 1188  1189  if( *connectivity_length != (int)vconnect.size() ) 1190  { 1191  return moab::MB_FAILURE; 1192  } // mismatched sizes 1193  1194  for( int i = 0; i < *connectivity_length; i++ ) 1195  { 1196  int inx = data.all_verts.index( vconnect[i] ); 1197  1198  if( -1 == inx ) 1199  { 1200  return moab::MB_FAILURE; 1201  } // error, vertex not in local range 1202  1203  element_connectivity[i] = inx; 1204  } 1205  1206  return moab::MB_SUCCESS; 1207 } 1208  1209 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid, 1210  iMOAB_LocalID* elem_index, 1211  int* connectivity_length, 1212  int* element_connectivity ) 1213 { 1214  assert( elem_index ); // ensure element index argument is not null 1215  assert( connectivity_length ); // ensure connectivity length argument is not null 1216  1217  appData& data = context.appDatas[*pid]; 1218  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) ); 1219  1220  int num_nodes; 1221  const EntityHandle* conn; 1222  1223  EntityHandle eh = data.primary_elems[*elem_index]; 1224  1225  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval ); 1226  1227  if( *connectivity_length < num_nodes ) 1228  { 1229  return moab::MB_FAILURE; 1230  } // wrong number of vertices 1231  1232  for( int i = 0; i < num_nodes; i++ ) 1233  { 1234  int index = data.all_verts.index( conn[i] ); 1235  1236  if( -1 == index ) 1237  { 1238  return moab::MB_FAILURE; 1239  } 1240  1241  element_connectivity[i] = index; 1242  } 1243  1244  *connectivity_length = num_nodes; 1245  1246  return moab::MB_SUCCESS; 1247 } 1248  1249 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid, 1250  iMOAB_GlobalID* global_block_ID, 1251  int* num_elements_in_block, 1252  int* element_ownership ) 1253 { 1254  assert( global_block_ID ); // ensure global block ID argument is not null 1255  assert( num_elements_in_block ); // ensure number of elements in block argument is not null 1256  1257  std::map< int, int >& matMap = context.appDatas[*pid].matIndex; 1258  1259  std::map< int, int >::iterator it = matMap.find( *global_block_ID ); 1260  1261  if( it == matMap.end() ) 1262  { 1263  return moab::MB_FAILURE; 1264  } // error in finding block with id 1265  1266  int blockIndex = matMap[*global_block_ID]; 1267  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex]; 1268  Range elems; 1269  1270  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval ); 1271  1272  if( elems.empty() ) 1273  { 1274  return moab::MB_FAILURE; 1275  } 1276  1277  if( *num_elements_in_block != (int)elems.size() ) 1278  { 1279  return moab::MB_FAILURE; 1280  } // bad memory allocation 1281  1282  int i = 0; 1283 #ifdef MOAB_HAVE_MPI 1284  ParallelComm* pco = context.appDatas[*pid].pcomm; 1285 #endif 1286  1287  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ ) 1288  { 1289 #ifdef MOAB_HAVE_MPI 1290  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval ); 1291 #else 1292  element_ownership[i] = 0; /* owned by 0 */ 1293 #endif 1294  } 1295  1296  return moab::MB_SUCCESS; 1297 } 1298  1299 ErrCode iMOAB_GetElementID( iMOAB_AppID pid, 1300  iMOAB_GlobalID* global_block_ID, 1301  int* num_elements_in_block, 1302  iMOAB_GlobalID* global_element_ID, 1303  iMOAB_LocalID* local_element_ID ) 1304 { 1305  assert( global_block_ID ); // ensure global block ID argument is not null 1306  assert( num_elements_in_block ); // ensure number of elements in block argument is not null 1307  1308  appData& data = context.appDatas[*pid]; 1309  std::map< int, int >& matMap = data.matIndex; 1310  1311  std::map< int, int >::iterator it = matMap.find( *global_block_ID ); 1312  1313  if( it == matMap.end() ) 1314  { 1315  return moab::MB_FAILURE; 1316  } // error in finding block with id 1317  1318  int blockIndex = matMap[*global_block_ID]; 1319  EntityHandle matMeshSet = data.mat_sets[blockIndex]; 1320  Range elems; 1321  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval ); 1322  1323  if( elems.empty() ) 1324  { 1325  return moab::MB_FAILURE; 1326  } 1327  1328  if( *num_elements_in_block != (int)elems.size() ) 1329  { 1330  return moab::MB_FAILURE; 1331  } // bad memory allocation 1332  1333  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval ); 1334  1335  // check that elems are among primary_elems in data 1336  for( int i = 0; i < *num_elements_in_block; i++ ) 1337  { 1338  local_element_ID[i] = data.primary_elems.index( elems[i] ); 1339  1340  if( -1 == local_element_ID[i] ) 1341  { 1342  return moab::MB_FAILURE; 1343  } // error, not in local primary elements 1344  } 1345  1346  return moab::MB_SUCCESS; 1347 } 1348  1349 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid, 1350  int* surface_BC_length, 1351  iMOAB_LocalID* local_element_ID, 1352  int* reference_surface_ID, 1353  int* boundary_condition_value ) 1354 { 1355  // we have to fill bc data for neumann sets;/ 1356  ErrorCode rval; 1357  1358  // it was counted above, in GetMeshInfo 1359  appData& data = context.appDatas[*pid]; 1360  int numNeuSets = (int)data.neu_sets.size(); 1361  1362  int index = 0; // index [0, surface_BC_length) for the arrays returned 1363  1364  for( int i = 0; i < numNeuSets; i++ ) 1365  { 1366  Range subents; 1367  EntityHandle nset = data.neu_sets[i]; 1368  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval ); 1369  1370  int neuVal; 1371  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval ); 1372  1373  for( Range::iterator it = subents.begin(); it != subents.end(); ++it ) 1374  { 1375  EntityHandle subent = *it; 1376  Range adjPrimaryEnts; 1377  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval ); 1378  1379  // get global id of the primary ents, and side number of the quad/subentity 1380  // this is moab ordering 1381  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ ) 1382  { 1383  EntityHandle primaryEnt = *pit; 1384  // get global id 1385  /*int globalID; 1386  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID); 1387  if (MB_SUCCESS!=rval) 1388  return moab::MB_FAILURE; 1389  global_element_ID[index] = globalID;*/ 1390  1391  // get local element id 1392  local_element_ID[index] = data.primary_elems.index( primaryEnt ); 1393  1394  if( -1 == local_element_ID[index] ) 1395  { 1396  return moab::MB_FAILURE; 1397  } // did not find the element locally 1398  1399  int side_number, sense, offset; 1400  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval ); 1401  1402  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6 1403  boundary_condition_value[index] = neuVal; 1404  index++; 1405  } 1406  } 1407  } 1408  1409  if( index != *surface_BC_length ) 1410  { 1411  return moab::MB_FAILURE; 1412  } // error in array allocations 1413  1414  return moab::MB_SUCCESS; 1415 } 1416  1417 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid, 1418  int* vertex_BC_length, 1419  iMOAB_LocalID* local_vertex_ID, 1420  int* boundary_condition_value ) 1421 { 1422  ErrorCode rval; 1423  1424  // it was counted above, in GetMeshInfo 1425  appData& data = context.appDatas[*pid]; 1426  int numDiriSets = (int)data.diri_sets.size(); 1427  int index = 0; // index [0, *vertex_BC_length) for the arrays returned 1428  1429  for( int i = 0; i < numDiriSets; i++ ) 1430  { 1431  Range verts; 1432  EntityHandle diset = data.diri_sets[i]; 1433  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval ); 1434  1435  int diriVal; 1436  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval ); 1437  1438  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit ) 1439  { 1440  EntityHandle vt = *vit; 1441  /*int vgid; 1442  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid); 1443  if (MB_SUCCESS!=rval) 1444  return moab::MB_FAILURE; 1445  global_vertext_ID[index] = vgid;*/ 1446  local_vertex_ID[index] = data.all_verts.index( vt ); 1447  1448  if( -1 == local_vertex_ID[index] ) 1449  { 1450  return moab::MB_FAILURE; 1451  } // vertex was not found 1452  1453  boundary_condition_value[index] = diriVal; 1454  index++; 1455  } 1456  } 1457  1458  if( *vertex_BC_length != index ) 1459  { 1460  return moab::MB_FAILURE; 1461  } // array allocation issue 1462  1463  return moab::MB_SUCCESS; 1464 } 1465  1466 // Utility function 1467 static void split_tag_names( std::string input_names, 1468  std::string& separator, 1469  std::vector< std::string >& list_tag_names ) 1470 { 1471  size_t pos = 0; 1472  std::string token; 1473  while( ( pos = input_names.find( separator ) ) != std::string::npos ) 1474  { 1475  token = input_names.substr( 0, pos ); 1476  if( !token.empty() ) list_tag_names.push_back( token ); 1477  // std::cout << token << std::endl; 1478  input_names.erase( 0, pos + separator.length() ); 1479  } 1480  if( !input_names.empty() ) 1481  { 1482  // if leftover something, or if not ended with delimiter 1483  list_tag_names.push_back( input_names ); 1484  } 1485  return; 1486 } 1487  1488 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid, 1489  const iMOAB_String tag_storage_name, 1490  int* tag_type, 1491  int* components_per_entity, 1492  int* tag_index ) 1493 { 1494  // we have 6 types of tags supported so far 1495  // check if tag type is valid 1496  if( *tag_type < 0 || *tag_type > 5 ) return moab::MB_FAILURE; 1497  1498  DataType tagDataType; 1499  TagType tagType; 1500  void* defaultVal = nullptr; 1501  int* defInt = new int[*components_per_entity]; 1502  double* defDouble = new double[*components_per_entity]; 1503  EntityHandle* defHandle = new EntityHandle[*components_per_entity]; 1504  1505  for( int i = 0; i < *components_per_entity; i++ ) 1506  { 1507  defInt[i] = 0; 1508  defDouble[i] = -1e+10; 1509  defHandle[i] = static_cast< EntityHandle >( 0 ); 1510  } 1511  1512  switch( *tag_type ) 1513  { 1514  case 0: 1515  tagDataType = MB_TYPE_INTEGER; 1516  tagType = MB_TAG_DENSE; 1517  defaultVal = defInt; 1518  break; 1519  1520  case 1: 1521  tagDataType = MB_TYPE_DOUBLE; 1522  tagType = MB_TAG_DENSE; 1523  defaultVal = defDouble; 1524  break; 1525  1526  case 2: 1527  tagDataType = MB_TYPE_HANDLE; 1528  tagType = MB_TAG_DENSE; 1529  defaultVal = defHandle; 1530  break; 1531  1532  case 3: 1533  tagDataType = MB_TYPE_INTEGER; 1534  tagType = MB_TAG_SPARSE; 1535  defaultVal = defInt; 1536  break; 1537  1538  case 4: 1539  tagDataType = MB_TYPE_DOUBLE; 1540  tagType = MB_TAG_SPARSE; 1541  defaultVal = defDouble; 1542  break; 1543  1544  case 5: 1545  tagDataType = MB_TYPE_HANDLE; 1546  tagType = MB_TAG_SPARSE; 1547  defaultVal = defHandle; 1548  break; 1549  1550  default: { 1551  delete[] defInt; 1552  delete[] defDouble; 1553  delete[] defHandle; 1554  return moab::MB_FAILURE; 1555  } // error 1556  } 1557  1558  // split storage names if separated list 1559  std::string tag_name( tag_storage_name ); 1560  // first separate the names of the tags 1561  // we assume that there are separators ":" between the tag names 1562  std::vector< std::string > tagNames; 1563  std::string separator( ":" ); 1564  split_tag_names( tag_name, separator, tagNames ); 1565  1566  ErrorCode rval = moab::MB_SUCCESS; // assume success already :) 1567  appData& data = context.appDatas[*pid]; 1568  int already_defined_tags = 0; 1569  1570  Tag tagHandle; 1571  for( size_t i = 0; i < tagNames.size(); i++ ) 1572  { 1573  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle, 1574  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal ); 1575  1576  if( MB_ALREADY_ALLOCATED == rval ) 1577  { 1578  std::map< std::string, Tag >& mTags = data.tagMap; 1579  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() ); 1580  1581  if( mit == mTags.end() ) 1582  { 1583  // add it to the map 1584  mTags[tagNames[i]] = tagHandle; 1585  // push it to the list of tags, too 1586  *tag_index = (int)data.tagList.size(); 1587  data.tagList.push_back( tagHandle ); 1588  } 1589  rval = MB_SUCCESS; 1590  already_defined_tags++; 1591  } 1592  else if( MB_SUCCESS == rval ) 1593  { 1594  data.tagMap[tagNames[i]] = tagHandle; 1595  *tag_index = (int)data.tagList.size(); 1596  data.tagList.push_back( tagHandle ); 1597  } 1598  else 1599  { 1600  rval = moab::MB_FAILURE; // some tags were not created 1601  } 1602  } 1603  // we don't need default values anymore, avoid leaks 1604  int rankHere = 0; 1605 #ifdef MOAB_HAVE_MPI 1606  ParallelComm* pco = context.appDatas[*pid].pcomm; 1607  rankHere = pco->rank(); 1608 #endif 1609  if( !rankHere && already_defined_tags ) 1610  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name 1611  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size() 1612  << " tags \n"; 1613  delete[] defInt; 1614  delete[] defDouble; 1615  delete[] defHandle; 1616  return rval; 1617 } 1618  1619 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid, 1620  const iMOAB_String tag_storage_name, 1621  int* num_tag_storage_length, 1622  int* ent_type, 1623  int* tag_storage_data ) 1624 { 1625  std::string tag_name( tag_storage_name ); 1626  1627  // Get the application data 1628  appData& data = context.appDatas[*pid]; 1629  // check if tag is defined 1630  if( data.tagMap.find( tag_name ) == data.tagMap.end() ) return moab::MB_FAILURE; 1631  1632  Tag tag = data.tagMap[tag_name]; 1633  1634  int tagLength = 0; 1635  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) ); 1636  1637  DataType dtype; 1638  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) ); 1639  1640  if( dtype != MB_TYPE_INTEGER ) 1641  { 1642  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." ); 1643  } 1644  1645  // set it on a subset of entities, based on type and length 1646  // if *entity_type = 0, then use vertices; else elements 1647  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems ); 1648  int nents_to_be_set = *num_tag_storage_length / tagLength; 1649  1650  if( nents_to_be_set > (int)ents_to_set->size() ) 1651  { 1652  return moab::MB_FAILURE; 1653  } // to many entities to be set or too few 1654  1655  // now set the tag data 1656  MB_CHK_ERR( context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data ) ); 1657  1658  return moab::MB_SUCCESS; // no error 1659 } 1660  1661 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid, 1662  const iMOAB_String tag_storage_name, 1663  int* num_tag_storage_length, 1664  int* ent_type, 1665  int* tag_storage_data ) 1666 { 1667  ErrorCode rval; 1668  std::string tag_name( tag_storage_name ); 1669  1670  appData& data = context.appDatas[*pid]; 1671  1672  if( data.tagMap.find( tag_name ) == data.tagMap.end() ) 1673  { 1674  return moab::MB_FAILURE; 1675  } // tag not defined 1676  1677  Tag tag = data.tagMap[tag_name]; 1678  1679  int tagLength = 0; 1680  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) ); 1681  1682  DataType dtype; 1683  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) ); 1684  1685  if( dtype != MB_TYPE_INTEGER ) 1686  { 1687  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." ); 1688  } 1689  1690  // set it on a subset of entities, based on type and length 1691  // if *entity_type = 0, then use vertices; else elements 1692  Range* ents_to_get = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems ); 1693  int nents_to_get = *num_tag_storage_length / tagLength; 1694  1695  if( nents_to_get > (int)ents_to_get->size() ) 1696  { 1697  return moab::MB_FAILURE; 1698  } // to many entities to get, or too little 1699  1700  // now set the tag data 1701  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval ); 1702  1703  return moab::MB_SUCCESS; // no error 1704 } 1705  1706 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid, 1707  const iMOAB_String tag_storage_names, 1708  int* num_tag_storage_length, 1709  int* ent_type, 1710  double* tag_storage_data ) 1711 { 1712  ErrorCode rval; 1713  std::string tag_names( tag_storage_names ); 1714  // exactly the same code as for int tag :) maybe should check the type of tag too 1715  std::vector< std::string > tagNames; 1716  std::vector< Tag > tagHandles; 1717  std::string separator( ":" ); 1718  split_tag_names( tag_names, separator, tagNames ); 1719  1720  appData& data = context.appDatas[*pid]; 1721  // set it on a subset of entities, based on type and length 1722  // if *entity_type = 0, then use vertices; else elements 1723  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems ); 1724  1725  int nents_to_be_set = (int)( *ents_to_set ).size(); 1726  int position = 0; 1727  for( size_t i = 0; i < tagNames.size(); i++ ) 1728  { 1729  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() ) 1730  { 1731  return moab::MB_FAILURE; 1732  } // some tag not defined yet in the app 1733  1734  Tag tag = data.tagMap[tagNames[i]]; 1735  1736  int tagLength = 0; 1737  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval ); 1738  1739  DataType dtype; 1740  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval ); 1741  1742  if( dtype != MB_TYPE_DOUBLE ) 1743  { 1744  return moab::MB_FAILURE; 1745  } 1746  1747  // set it on the subset of entities, based on type and length 1748  if( position + tagLength * nents_to_be_set > *num_tag_storage_length ) 1749  return moab::MB_FAILURE; // too many entity values to be set 1750  1751  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval ); 1752  // increment position to next tag 1753  position = position + tagLength * nents_to_be_set; 1754  } 1755  return moab::MB_SUCCESS; // no error 1756 } 1757  1758 ErrCode iMOAB_SetDoubleTagStorageWithGid( iMOAB_AppID pid, 1759  const iMOAB_String tag_storage_names, 1760  int* num_tag_storage_length, 1761  int* ent_type, 1762  double* tag_storage_data, 1763  int* globalIds ) 1764 { 1765  ErrorCode rval; 1766  std::string tag_names( tag_storage_names ); 1767  // exactly the same code as for int tag :) maybe should check the type of tag too 1768  std::vector< std::string > tagNames; 1769  std::vector< Tag > tagHandles; 1770  std::string separator( ":" ); 1771  split_tag_names( tag_names, separator, tagNames ); 1772  1773  appData& data = context.appDatas[*pid]; 1774  // set it on a subset of entities, based on type and length 1775  // if *entity_type = 0, then use vertices; else elements 1776  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems ); 1777  int nents_to_be_set = (int)( *ents_to_set ).size(); 1778  1779  Tag gidTag = context.MBI->globalId_tag(); 1780  std::vector< int > gids; 1781  gids.resize( nents_to_be_set ); 1782  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval ); 1783  1784  // so we will need to set the tags according to the global id passed; 1785  // so the order in tag_storage_data is the same as the order in globalIds, but the order 1786  // in local range is gids 1787  std::map< int, EntityHandle > eh_by_gid; 1788  int i = 0; 1789  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i ) 1790  { 1791  eh_by_gid[gids[i]] = *it; 1792  } 1793  1794  std::vector< int > tagLengths( tagNames.size() ); 1795  std::vector< Tag > tagList; 1796  size_t total_tag_len = 0; 1797  for( size_t i = 0; i < tagNames.size(); i++ ) 1798  { 1799  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() ) 1800  { 1801  MB_SET_ERR( moab::MB_FAILURE, "tag missing" ); 1802  } // some tag not defined yet in the app 1803  1804  Tag tag = data.tagMap[tagNames[i]]; 1805  tagList.push_back( tag ); 1806  1807  int tagLength = 0; 1808  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval ); 1809  1810  total_tag_len += tagLength; 1811  tagLengths[i] = tagLength; 1812  DataType dtype; 1813  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval ); 1814  1815  if( dtype != MB_TYPE_DOUBLE ) 1816  { 1817  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" ); 1818  } 1819  } 1820  bool serial = true; 1821 #ifdef MOAB_HAVE_MPI 1822  ParallelComm* pco = context.appDatas[*pid].pcomm; 1823  unsigned num_procs = pco->size(); 1824  if( num_procs > 1 ) serial = false; 1825 #endif 1826  1827  if( serial ) 1828  { 1829  // we do not assume anymore that the number of entities has to match 1830  // we will set only what matches, and skip entities that do not have corresponding global ids 1831  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 ); 1832  // tags are unrolled, we loop over global ids first, then careful about tags 1833  for( int i = 0; i < nents_to_be_set; i++ ) 1834  { 1835  int gid = globalIds[i]; 1836  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid ); 1837  if( mapIt == eh_by_gid.end() ) continue; 1838  EntityHandle eh = mapIt->second; 1839  // now loop over tags 1840  int indexInTagValues = 0; // 1841  for( size_t j = 0; j < tagList.size(); j++ ) 1842  { 1843  indexInTagValues += i * tagLengths[j]; 1844  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval ); 1845  // advance the pointer/index 1846  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data 1847  } 1848  } 1849  } 1850 #ifdef MOAB_HAVE_MPI 1851  else // it can be not serial only if pco->size() > 1, parallel 1852  { 1853  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it 1854  // we will create first a tuple to rendevous points, then from there send to the processor that requested it 1855  // it is a 2-hop global gather scatter 1856  // TODO: allow for tags of different length; this is wrong 1857  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length? 1858  // we do not expect the sizes to match 1859  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 ); 1860  TupleList TLsend; 1861  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles 1862  TLsend.enableWriteAccess(); 1863  // the processor id that processes global_id is global_id / num_ents_per_proc 1864  1865  int indexInRealLocal = 0; 1866  for( int i = 0; i < nbLocalVals; i++ ) 1867  { 1868  // to proc, marker, element local index, index in el 1869  int marker = globalIds[i]; 1870  int to_proc = marker % num_procs; 1871  int n = TLsend.get_n(); 1872  TLsend.vi_wr[2 * n] = to_proc; // send to processor 1873  TLsend.vi_wr[2 * n + 1] = marker; 1874  int indexInTagValues = 0; 1875  // tag data collect by number of tags 1876  for( size_t j = 0; j < tagList.size(); j++ ) 1877  { 1878  indexInTagValues += i * tagLengths[j]; 1879  for( int k = 0; k < tagLengths[j]; k++ ) 1880  { 1881  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k]; 1882  } 1883  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j]; 1884  } 1885  TLsend.inc_n(); 1886  } 1887  1888  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 ); 1889  // send now requests, basically inform the rendez-vous point who needs a particular global id 1890  // send the data to the other processors: 1891  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 ); 1892  TupleList TLreq; 1893  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set ); 1894  TLreq.enableWriteAccess(); 1895  for( int i = 0; i < nents_to_be_set; i++ ) 1896  { 1897  // to proc, marker 1898  int marker = gids[i]; 1899  int to_proc = marker % num_procs; 1900  int n = TLreq.get_n(); 1901  TLreq.vi_wr[2 * n] = to_proc; // send to processor 1902  TLreq.vi_wr[2 * n + 1] = marker; 1903  // tag data collect by number of tags 1904  TLreq.inc_n(); 1905  } 1906  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 ); 1907  1908  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1] 1909  // we should first order by global id, and then build the new TL with send to proc, global id and 1910  // tags for it 1911  // sort by global ids the tuple lists 1912  moab::TupleList::buffer sort_buffer; 1913  sort_buffer.buffer_init( TLreq.get_n() ); 1914  TLreq.sort( 1, &sort_buffer ); 1915  sort_buffer.reset(); 1916  sort_buffer.buffer_init( TLsend.get_n() ); 1917  TLsend.sort( 1, &sort_buffer ); 1918  sort_buffer.reset(); 1919  // now send the tag values to the proc that requested it 1920  // in theory, for a full partition, TLreq and TLsend should have the same size, and 1921  // each dof should have exactly one target proc. Is that true or not in general ? 1922  // how do we plan to use this? Is it better to store the comm graph for future 1923  1924  // start copy from comm graph settle 1925  TupleList TLBack; 1926  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values 1927  TLBack.enableWriteAccess(); 1928  1929  int n1 = TLreq.get_n(); 1930  int n2 = TLsend.get_n(); 1931  1932  int indexInTLreq = 0; 1933  int indexInTLsend = 0; // advance both, according to the marker 1934  if( n1 > 0 && n2 > 0 ) 1935  { 1936  1937  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done 1938  { 1939  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1]; 1940  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1]; 1941  if( currentValue1 < currentValue2 ) 1942  { 1943  // we have a big problem; basically, we are saying that 1944  // dof currentValue is on one model and not on the other 1945  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n"; 1946  indexInTLreq++; 1947  continue; 1948  } 1949  if( currentValue1 > currentValue2 ) 1950  { 1951  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n"; 1952  indexInTLsend++; 1953  continue; 1954  } 1955  int size1 = 1; 1956  int size2 = 1; 1957  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] ) 1958  size1++; 1959  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] ) 1960  size2++; 1961  // must be found in both lists, find the start and end indices 1962  for( int i1 = 0; i1 < size1; i1++ ) 1963  { 1964  for( int i2 = 0; i2 < size2; i2++ ) 1965  { 1966  // send the info back to components 1967  int n = TLBack.get_n(); 1968  TLBack.reserve(); 1969  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker 1970  // came from, info from comp2 1971  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?) 1972  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2 1973  // also fill tag values 1974  for( size_t k = 0; k < total_tag_len; k++ ) 1975  { 1976  TLBack.vr_rd[total_tag_len * n + k] = 1977  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values 1978  } 1979  } 1980  } 1981  indexInTLreq += size1; 1982  indexInTLsend += size2; 1983  } 1984  } 1985  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 ); 1986  // end copy from comm graph 1987  // after we are done sending, we need to set those tag values, in a reverse process compared to send 1988  n1 = TLBack.get_n(); 1989  double* ptrVal = &TLBack.vr_rd[0]; // 1990  for( int i = 0; i < n1; i++ ) 1991  { 1992  int gid = TLBack.vi_rd[3 * i + 1]; // marker 1993  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid ); 1994  if( mapIt == eh_by_gid.end() ) continue; 1995  EntityHandle eh = mapIt->second; 1996  // now loop over tags 1997  1998  for( size_t j = 0; j < tagList.size(); j++ ) 1999  { 2000  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval ); 2001  // advance the pointer/index 2002  ptrVal += tagLengths[j]; // at the end of tag data per call 2003  } 2004  } 2005  } 2006 #endif 2007  return MB_SUCCESS; 2008 } 2009 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid, 2010  const iMOAB_String tag_storage_names, 2011  int* num_tag_storage_length, 2012  int* ent_type, 2013  double* tag_storage_data ) 2014 { 2015  ErrorCode rval; 2016  // exactly the same code, except tag type check 2017  std::string tag_names( tag_storage_names ); 2018  // exactly the same code as for int tag :) maybe should check the type of tag too 2019  std::vector< std::string > tagNames; 2020  std::vector< Tag > tagHandles; 2021  std::string separator( ":" ); 2022  split_tag_names( tag_names, separator, tagNames ); 2023  2024  appData& data = context.appDatas[*pid]; 2025  2026  // set it on a subset of entities, based on type and length 2027  Range* ents_to_get = nullptr; 2028  2029  if( *ent_type == 0 ) // vertices 2030  { 2031  ents_to_get = &data.all_verts; 2032  } 2033  else if( *ent_type == 1 ) 2034  { 2035  ents_to_get = &data.primary_elems; 2036  } 2037  int nents_to_get = (int)ents_to_get->size(); 2038  int position = 0; 2039  for( size_t i = 0; i < tagNames.size(); i++ ) 2040  { 2041  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() ) 2042  { 2043  return moab::MB_FAILURE; 2044  } // tag not defined 2045  2046  Tag tag = data.tagMap[tagNames[i]]; 2047  2048  int tagLength = 0; 2049  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval ); 2050  2051  DataType dtype; 2052  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval ); 2053  2054  if( dtype != MB_TYPE_DOUBLE ) 2055  { 2056  return moab::MB_FAILURE; 2057  } 2058  2059  if( position + nents_to_get * tagLength > *num_tag_storage_length ) 2060  return moab::MB_FAILURE; // too many entity values to get 2061  2062  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval ); 2063  position = position + nents_to_get * tagLength; 2064  } 2065  2066  return moab::MB_SUCCESS; // no error 2067 } 2068  2069 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type ) 2070 { 2071 #ifdef MOAB_HAVE_MPI 2072  appData& data = context.appDatas[*pid]; 2073  Range ent_exchange; 2074  std::vector< Tag > tags; 2075  2076  for( int i = 0; i < *num_tag; i++ ) 2077  { 2078  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() ) 2079  { 2080  return moab::MB_FAILURE; 2081  } // error in tag index 2082  2083  tags.push_back( data.tagList[tag_indices[i]] ); 2084  } 2085  2086  if( *ent_type == 0 ) 2087  { 2088  ent_exchange = data.all_verts; 2089  } 2090  else if( *ent_type == 1 ) 2091  { 2092  ent_exchange = data.primary_elems; 2093  } 2094  else 2095  { 2096  return moab::MB_FAILURE; 2097  } // unexpected type 2098  2099  ParallelComm* pco = context.appDatas[*pid].pcomm; 2100  2101  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval ); 2102  2103 #else 2104  /* do nothing if serial */ 2105  // just silence the warning 2106  // do not call sync tags in serial! 2107  int k = *pid + *num_tag + *tag_indices + *ent_type; 2108  k++; 2109 #endif 2110  2111  return moab::MB_SUCCESS; 2112 } 2113  2114 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type ) 2115 { 2116 #ifdef MOAB_HAVE_MPI 2117  appData& data = context.appDatas[*pid]; 2118  Range ent_exchange; 2119  2120  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() ) 2121  { 2122  return moab::MB_FAILURE; 2123  } // error in tag index 2124  2125  Tag tagh = data.tagList[*tag_index]; 2126  2127  if( *ent_type == 0 ) 2128  { 2129  ent_exchange = data.all_verts; 2130  } 2131  else if( *ent_type == 1 ) 2132  { 2133  ent_exchange = data.primary_elems; 2134  } 2135  else 2136  { 2137  return moab::MB_FAILURE; 2138  } // unexpected type 2139  2140  ParallelComm* pco = context.appDatas[*pid].pcomm; 2141  // we could do different MPI_Op; do not bother now, we will call from fortran 2142  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval ); 2143  2144 #else 2145  /* do nothing if serial */ 2146  // just silence the warning 2147  // do not call sync tags in serial! 2148  int k = *pid + *tag_index + *ent_type; 2149  k++; // just do junk, to avoid complaints 2150 #endif 2151  return moab::MB_SUCCESS; 2152 } 2153  2154 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid, 2155  iMOAB_LocalID* local_index, 2156  int* num_adjacent_elements, 2157  iMOAB_LocalID* adjacent_element_IDs ) 2158 { 2159  ErrorCode rval; 2160  2161  // one neighbor for each subentity of dimension-1 2162  MeshTopoUtil mtu( context.MBI ); 2163  appData& data = context.appDatas[*pid]; 2164  EntityHandle eh = data.primary_elems[*local_index]; 2165  Range adjs; 2166  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval ); 2167  2168  if( *num_adjacent_elements < (int)adjs.size() ) 2169  { 2170  return moab::MB_FAILURE; 2171  } // not dimensioned correctly 2172  2173  *num_adjacent_elements = (int)adjs.size(); 2174  2175  for( int i = 0; i < *num_adjacent_elements; i++ ) 2176  { 2177  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] ); 2178  } 2179  2180  return moab::MB_SUCCESS; 2181 } 2182  2183 #if 0 2184  2185 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs ) 2186 { 2187  return moab::MB_SUCCESS; 2188 } 2189  2190 #endif 2191  2192 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates ) 2193 { 2194  ErrorCode rval; 2195  appData& data = context.appDatas[*pid]; 2196  2197  if( !data.local_verts.empty() ) // we should have no vertices in the app 2198  { 2199  return moab::MB_FAILURE; 2200  } 2201  2202  int nverts = *coords_len / *dim; 2203  2204  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval ); 2205  2206  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval ); 2207  2208  // also add the vertices to the all_verts range 2209  data.all_verts.merge( data.local_verts ); 2210  return moab::MB_SUCCESS; 2211 } 2212  2213 ErrCode iMOAB_CreateElements( iMOAB_AppID pid, 2214  int* num_elem, 2215  int* type, 2216  int* num_nodes_per_element, 2217  int* connectivity, 2218  int* block_ID ) 2219 { 2220  // Create elements 2221  appData& data = context.appDatas[*pid]; 2222  2223  ReadUtilIface* read_iface; 2224  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval ); 2225  2226  EntityType mbtype = (EntityType)( *type ); 2227  EntityHandle actual_start_handle; 2228  EntityHandle* array = nullptr; 2229  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval ); 2230  2231  // fill up with actual connectivity from input; assume the vertices are in order, and start 2232  // vertex is the first in the current data vertex range 2233  EntityHandle firstVertex = data.local_verts[0]; 2234  2235  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ ) 2236  { 2237  array[j] = connectivity[j] + firstVertex - 1; 2238  } // assumes connectivity uses 1 based array (from fortran, mostly) 2239  2240  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 ); 2241  2242  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval ); 2243  2244  data.primary_elems.merge( new_elems ); 2245  2246  // add to adjacency 2247  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval ); 2248  // organize all new elements in block, with the given block ID; if the block set is not 2249  // existing, create a new mesh set; 2250  Range sets; 2251  int set_no = *block_ID; 2252  const void* setno_ptr = &set_no; 2253  rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1, 2254  sets ); 2255  EntityHandle block_set; 2256  2257  if( MB_FAILURE == rval || sets.empty() ) 2258  { 2259  // create a new set, with this block ID 2260  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval ); 2261  2262  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval ); 2263  2264  // add the material set to file set 2265  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval ); 2266  } 2267  else 2268  { 2269  block_set = sets[0]; 2270  } // first set is the one we want 2271  2272  /// add the new ents to the clock set 2273  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval ); 2274  2275  return moab::MB_SUCCESS; 2276 } 2277  2278 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems ) 2279 { 2280  appData& data = context.appDatas[*pid]; 2281  data.num_global_vertices = *num_global_verts; 2282  data.num_global_elements = *num_global_elems; 2283  return moab::MB_SUCCESS; 2284 } 2285  2286 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems ) 2287 { 2288  appData& data = context.appDatas[*pid]; 2289  if( nullptr != num_global_verts ) 2290  { 2291  *num_global_verts = data.num_global_vertices; 2292  } 2293  if( nullptr != num_global_elems ) 2294  { 2295  *num_global_elems = data.num_global_elements; 2296  } 2297  2298  return moab::MB_SUCCESS; 2299 } 2300  2301 #ifdef MOAB_HAVE_MPI 2302  2303 // this makes sense only for parallel runs 2304 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker ) 2305 { 2306  appData& data = context.appDatas[*pid]; 2307  ParallelComm* pco = context.appDatas[*pid].pcomm; 2308  EntityHandle cset = data.file_set; 2309  int dum_id = 0; 2310  ErrorCode rval; 2311  if( data.primary_elems.empty() ) 2312  { 2313  // skip actual resolve, assume vertices are distributed already , 2314  // no need to share them 2315  } 2316  else 2317  { 2318  // create an integer tag for resolving ; maybe it can be a long tag in the future 2319  // (more than 2 B vertices;) 2320  2321  Tag stag; 2322  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE, 2323  &dum_id );MB_CHK_ERR( rval ); 2324  2325  if( *num_verts > (int)data.local_verts.size() ) 2326  { 2327  return moab::MB_FAILURE; 2328  } // we are not setting the size 2329  2330  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag 2331  2332  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval ); 2333  2334  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval ); 2335  } 2336  // provide partition tag equal to rank 2337  Tag part_tag; 2338  dum_id = -1; 2339  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag, 2340  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id ); 2341  2342  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) ) 2343  { 2344  std::cout << " can't get par part tag.\n"; 2345  return moab::MB_FAILURE; 2346  } 2347  2348  int rank = pco->rank(); 2349  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval ); 2350  2351  return moab::MB_SUCCESS; 2352 } 2353  2354 // this assumes that this was not called before 2355 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim ) 2356 { 2357  ErrorCode rval; 2358  2359  // verify we have valid ghost layers input specified. If invalid, exit quick. 2360  if( num_ghost_layers && *num_ghost_layers <= 0 ) 2361  { 2362  return moab::MB_SUCCESS; 2363  } // nothing to do 2364  2365  appData& data = context.appDatas[*pid]; 2366  ParallelComm* pco = context.appDatas[*pid].pcomm; 2367  2368  // maybe we should be passing this too; most of the time we do not need additional ents collective call 2369  constexpr int addl_ents = 0; 2370  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities 2371  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval ); 2372  for( int i = 2; i <= *num_ghost_layers; i++ ) 2373  { 2374  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers 2375  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer 2376  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval ); 2377  } 2378  2379  // Update ghost layer information 2380  data.num_ghost_layers = *num_ghost_layers; 2381  2382  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set 2383  return iMOAB_UpdateMeshInfo( pid ); 2384 } 2385  2386 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, 2387  MPI_Comm* joint_communicator, 2388  MPI_Group* receivingGroup, 2389  int* rcompid, 2390  int* method ) 2391 { 2392  assert( joint_communicator != nullptr ); 2393  assert( receivingGroup != nullptr ); 2394  assert( rcompid != nullptr ); 2395  2396  ErrorCode rval; 2397  int ierr; 2398  appData& data = context.appDatas[*pid]; 2399  ParallelComm* pco = context.appDatas[*pid].pcomm; 2400  2401  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) 2402  : *joint_communicator ); 2403  MPI_Group recvGroup = 2404  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup ); 2405  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along 2406  // first see what are the processors in each group; get the sender group too, from the sender communicator 2407  MPI_Group senderGroup; 2408  ierr = MPI_Comm_group( sender, &senderGroup ); 2409  if( ierr != 0 ) return moab::MB_FAILURE; 2410  2411  // instantiate the par comm graph 2412  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, 2413  // int coid2) 2414  ParCommGraph* cgraph = 2415  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid ); 2416  // we should search if we have another pcomm with the same comp ids in the list already 2417  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph 2418  context.appDatas[*pid].pgraph[*rcompid] = cgraph; 2419  2420  int sender_rank = -1; 2421  MPI_Comm_rank( sender, &sender_rank ); 2422  2423  // decide how to distribute elements to each processor 2424  // now, get the entities on local processor, and pack them into a buffer for various processors 2425  // we will do trivial partition: first get the total number of elements from "sender" 2426  std::vector< int > number_elems_per_part; 2427  // how to distribute local elements to receiving tasks? 2428  // trivial partition: compute first the total number of elements need to be sent 2429  Range owned = context.appDatas[*pid].owned_elems; 2430  if( owned.size() == 0 ) 2431  { 2432  // must be vertices that we want to send then 2433  owned = context.appDatas[*pid].local_verts; 2434  // we should have some vertices here 2435  } 2436  2437  if( *method == 0 ) // trivial partitioning, old method 2438  { 2439  int local_owned_elem = (int)owned.size(); 2440  int size = pco->size(); 2441  int rank = pco->rank(); 2442  number_elems_per_part.resize( size ); // 2443  number_elems_per_part[rank] = local_owned_elem; 2444 #if( MPI_VERSION >= 2 ) 2445  // Use "in place" option 2446  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender ); 2447 #else 2448  { 2449  std::vector< int > all_tmp( size ); 2450  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender ); 2451  number_elems_per_part = all_tmp; 2452  } 2453 #endif 2454  2455  if( ierr != 0 ) 2456  { 2457  return moab::MB_FAILURE; 2458  } 2459  2460  // every sender computes the trivial partition, it is cheap, and we need to send it anyway 2461  // to each sender 2462  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval ); 2463  2464  rval = cgraph->send_graph( global );MB_CHK_ERR( rval ); 2465  } 2466  else // *method != 0, so it is either graph or geometric, parallel 2467  { 2468  // owned are the primary elements on this app 2469  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval ); 2470  2471  // basically, send the graph to the receiver side, with unblocking send 2472  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval ); 2473  } 2474  // pco is needed to pack, not for communication 2475  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval ); 2476  2477  // mark for deletion 2478  MPI_Group_free( &senderGroup ); 2479  return moab::MB_SUCCESS; 2480 } 2481  2482 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* joint_communicator, MPI_Group* sendingGroup, int* scompid ) 2483 { 2484  assert( joint_communicator != nullptr ); 2485  assert( sendingGroup != nullptr ); 2486  assert( scompid != nullptr ); 2487  2488  ErrorCode rval; 2489  appData& data = context.appDatas[*pid]; 2490  ParallelComm* pco = context.appDatas[*pid].pcomm; 2491  MPI_Comm receive = pco->comm(); 2492  EntityHandle local_set = data.file_set; 2493  2494  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) 2495  : *joint_communicator ); 2496  MPI_Group sendGroup = 2497  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup ); 2498  2499  // first see what are the processors in each group; get the sender group too, from the sender 2500  // communicator 2501  MPI_Group receiverGroup; 2502  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr ); 2503  2504  // instantiate the par comm graph 2505  ParCommGraph* cgraph = 2506  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id ); 2507  // TODO we should search if we have another pcomm with the same comp ids in the list already 2508  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph 2509  context.appDatas[*pid].pgraph[*scompid] = cgraph; 2510  2511  int receiver_rank = -1; 2512  MPI_Comm_rank( receive, &receiver_rank ); 2513  2514  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver 2515  // knows what data to expect 2516  std::vector< int > pack_array; 2517  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval ); 2518  2519  // senders across for the current receiver 2520  int current_receiver = cgraph->receiver( receiver_rank ); 2521  2522  std::vector< int > senders_local; 2523  size_t n = 0; 2524  2525  while( n < pack_array.size() ) 2526  { 2527  if( current_receiver == pack_array[n] ) 2528  { 2529  for( int j = 0; j < pack_array[n + 1]; j++ ) 2530  { 2531  senders_local.push_back( pack_array[n + 2 + j] ); 2532  } 2533  2534  break; 2535  } 2536  2537  n = n + 2 + pack_array[n + 1]; 2538  } 2539  2540 #ifdef VERBOSE 2541  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from " 2542  << senders_local.size() << " tasks: "; 2543  2544  for( int k = 0; k < (int)senders_local.size(); k++ ) 2545  { 2546  std::cout << " " << senders_local[k]; 2547  } 2548  2549  std::cout << "\n"; 2550 #endif 2551  2552  if( senders_local.empty() ) 2553  { 2554  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n"; 2555  } 2556  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval ); 2557  2558  // after we are done, we could merge vertices that come from different senders, but 2559  // have the same global id 2560  Tag idtag; 2561  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval ); 2562  2563  // data.point_cloud = false; 2564  Range local_ents; 2565  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval ); 2566  2567  // do not do merge if point cloud 2568  if( !local_ents.all_of_type( MBVERTEX ) ) 2569  { 2570  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices 2571  // that might come from different senders 2572  { 2573  2574  Range local_verts = local_ents.subset_by_type( MBVERTEX ); 2575  Range local_elems = subtract( local_ents, local_verts ); 2576  2577  // remove from local set the vertices 2578  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval ); 2579  2580 #ifdef VERBOSE 2581  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n"; 2582 #endif 2583  MergeMesh mm( context.MBI ); 2584  2585  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval ); 2586  2587  Range new_verts; // local elems are local entities without vertices 2588  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval ); 2589  2590 #ifdef VERBOSE 2591  std::cout << "after merging: new verts: " << new_verts.size() << "\n"; 2592 #endif 2593  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval ); 2594  } 2595  } 2596  else 2597  data.point_cloud = true; 2598  2599  if( !data.point_cloud ) 2600  { 2601  // still need to resolve shared entities (in this case, vertices ) 2602  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval ); 2603  } 2604  else 2605  { 2606  // if partition tag exists, set it to current rank; just to make it visible in VisIt 2607  Tag densePartTag; 2608  rval = context.MBI->tag_get_handle( "partition", densePartTag ); 2609  if( nullptr != densePartTag && MB_SUCCESS == rval ) 2610  { 2611  Range local_verts; 2612  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval ); 2613  std::vector< int > vals; 2614  int rank = pco->rank(); 2615  vals.resize( local_verts.size(), rank ); 2616  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval ); 2617  } 2618  } 2619  // set the parallel partition tag 2620  Tag part_tag; 2621  int dum_id = -1; 2622  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag, 2623  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id ); 2624  2625  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) ) 2626  { 2627  std::cout << " can't get par part tag.\n"; 2628  return moab::MB_FAILURE; 2629  } 2630  2631  int rank = pco->rank(); 2632  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval ); 2633  2634  // make sure that the GLOBAL_ID is defined as a tag 2635  int tagtype = 0; // dense, integer 2636  int numco = 1; // size 2637  int tagindex; // not used 2638  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval ); 2639  // populate the mesh with current data info 2640  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval ); 2641  2642  // mark for deletion 2643  MPI_Group_free( &receiverGroup ); 2644  2645  return moab::MB_SUCCESS; 2646 } 2647  2648 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, 2649  const iMOAB_String tag_storage_name, 2650  MPI_Comm* joint_communicator, 2651  int* context_id ) 2652 { 2653  appData& data = context.appDatas[*pid]; 2654  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id ); 2655  if( mt == data.pgraph.end() ) 2656  { 2657  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:"; 2658  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ ) 2659  std::cout << " " << mit->first; 2660  std::cout << "\n"; 2661  return moab::MB_FAILURE; 2662  } 2663  2664  ParCommGraph* cgraph = mt->second; 2665  ParallelComm* pco = context.appDatas[*pid].pcomm; 2666  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) 2667  : *joint_communicator ); 2668  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems ); 2669  2670 #ifdef MOAB_HAVE_TEMPESTREMAP 2671  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;; 2672  { 2673  EntityHandle cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 2674  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) ); 2675  } 2676 #else 2677  // now, in case we are sending from intx between ocn and atm, we involve coverage set 2678  // how do I know if this receiver already participated in an intersection driven by coupler? 2679  // also, what if this was the "source" mesh in intx? 2680  // in that case, the elements might have been instantiated in the coverage set locally, the 2681  // "owned" range can be different the elements are now in tempestRemap coverage_set 2682  EntityHandle cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ? 2683  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) ); 2684 #endif 2685  2686  std::string tag_name( tag_storage_name ); 2687  2688  // basically, we assume everything is defined already on the tag, 2689  // and we can get the tags just by its name 2690  // we assume that there are separators ":" between the tag names 2691  std::vector< std::string > tagNames; 2692  std::vector< Tag > tagHandles; 2693  std::string separator( ":" ); 2694  split_tag_names( tag_name, separator, tagNames ); 2695  for( size_t i = 0; i < tagNames.size(); i++ ) 2696  { 2697  Tag tagHandle; 2698  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle ); 2699  if( MB_SUCCESS != rval || nullptr == tagHandle ) 2700  { 2701  MB_CHK_SET_ERR( moab::MB_FAILURE, 2702  "can't get tag handle with name: " << tagNames[i].c_str() << " at index " << i ); 2703  } 2704  tagHandles.push_back( tagHandle ); 2705  } 2706  2707  // pco is needed to pack, and for moab instance, not for communication! 2708  // still use nonblocking communication, over the joint comm 2709  MB_CHK_ERR( cgraph->send_tag_values( global, pco, owned, tagHandles ) ); 2710  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities 2711  2712  return moab::MB_SUCCESS; 2713 } 2714  2715 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, 2716  const iMOAB_String tag_storage_name, 2717  MPI_Comm* joint_communicator, 2718  int* context_id ) 2719 { 2720  appData& data = context.appDatas[*pid]; 2721  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id ); 2722  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; 2723  2724  ParCommGraph* cgraph = mt->second; 2725  ParallelComm* pco = context.appDatas[*pid].pcomm; 2726  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) 2727  : *joint_communicator ); 2728  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems ); 2729  2730  // how do I know if this receiver already participated in an intersection driven by coupler? 2731  // also, what if this was the "source" mesh in intx? 2732  // in that case, the elements might have been instantiated in the coverage set locally, the 2733  // "owned" range can be different the elements are now in tempestRemap coverage_set 2734  EntityHandle cover_set = cgraph->get_cover_set(); 2735  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) ); 2736  2737  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for 2738  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set 2739  // from ints remapper 2740 #ifdef MOAB_HAVE_TEMPESTREMAP 2741  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;; 2742  { 2743  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 2744  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) ); 2745  } 2746 #endif 2747  2748  std::string tag_name( tag_storage_name ); 2749  // we assume that there are separators ";" between the tag names 2750  std::vector< std::string > tagNames; 2751  std::vector< Tag > tagHandles; 2752  std::string separator( ":" ); 2753  split_tag_names( tag_name, separator, tagNames ); 2754  for( size_t i = 0; i < tagNames.size(); i++ ) 2755  { 2756  Tag tagHandle; 2757  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle ); 2758  if( MB_SUCCESS != rval || nullptr == tagHandle ) 2759  { 2760  MB_CHK_SET_ERR( moab::MB_FAILURE, 2761  " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i ); 2762  } 2763  tagHandles.push_back( tagHandle ); 2764  } 2765  2766 #ifdef VERBOSE 2767  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name 2768  << " and file set = " << ( data.file_set ) << "\n"; 2769 #endif 2770  // pco is needed to pack, and for moab instance, not for communication! 2771  // still use nonblocking communication 2772  MB_CHK_SET_ERR( cgraph->receive_tag_values( global, pco, owned, tagHandles ), "failed to receive tag values" ); 2773  2774 #ifdef VERBOSE 2775  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n"; 2776 #endif 2777  2778  return moab::MB_SUCCESS; 2779 } 2780  2781 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id ) 2782 { 2783  // need first to find the pgraph that holds the information we need 2784  // this will be called on sender side only 2785  appData& data = context.appDatas[*pid]; 2786  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id ); 2787  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error 2788  2789  mt->second->release_send_buffers(); 2790  return moab::MB_SUCCESS; 2791 } 2792  2793 //#define VERBOSE 2794 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1, 2795  iMOAB_AppID pid2, 2796  MPI_Comm* joint_communicator, 2797  MPI_Group* group1, 2798  MPI_Group* group2, 2799  int* type1, 2800  int* type2, 2801  int* comp1, 2802  int* comp2 ) 2803 { 2804  assert( joint_communicator ); 2805  assert( group1 ); 2806  assert( group2 ); 2807  ErrorCode rval = MB_SUCCESS; 2808  int localRank = 0, numProcs = 1; 2809  2810  bool isFortran = false; 2811  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran; 2812  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran; 2813  2814  MPI_Comm global = 2815  ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) : *joint_communicator ); 2816  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 ); 2817  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 ); 2818  2819  MPI_Comm_rank( global, &localRank ); 2820  MPI_Comm_size( global, &numProcs ); 2821  // instantiate the par comm graph 2822  2823  // we should search if we have another pcomm with the same comp ids in the list already 2824  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph 2825  ParCommGraph* cgraph = nullptr; 2826  ParCommGraph* cgraph_rev = nullptr; 2827  if( *pid1 >= 0 ) 2828  { 2829  appData& data = context.appDatas[*pid1]; 2830  auto mt = data.pgraph.find( *comp2 ); 2831  if( mt != data.pgraph.end() ) data.pgraph.erase( mt ); 2832  // now let us compute the new communication graph 2833  cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 ); 2834  context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp 2835  } 2836  if( *pid2 >= 0 ) 2837  { 2838  appData& data = context.appDatas[*pid2]; 2839  auto mt = data.pgraph.find( *comp1 ); 2840  if( mt != data.pgraph.end() ) data.pgraph.erase( mt ); 2841  // now let us compute the new communication graph 2842  cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 ); 2843  context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1 2844  } 2845  2846  // each model has a list of global ids that will need to be sent by gs to rendezvous the other 2847  // model on the joint comm 2848  TupleList TLcomp1; 2849  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker 2850  TupleList TLcomp2; 2851  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker 2852  // will push_back a new tuple, if needed 2853  2854  TLcomp1.enableWriteAccess(); 2855  2856  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID 2857  Tag gdsTag; 2858  // find the values on first cell 2859  int lenTagType1 = 1; 2860  if( 1 == *type1 || 1 == *type2 ) 2861  { 2862  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval ); 2863  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16 2864  } 2865  Tag gidTag = context.MBI->globalId_tag(); 2866  2867  std::vector< int > valuesComp1; 2868  // populate first tuple 2869  if( *pid1 >= 0 ) 2870  { 2871  appData& data1 = context.appDatas[*pid1]; 2872  EntityHandle fset1 = data1.file_set; 2873  // in case of tempest remap, get the coverage set 2874 #ifdef MOAB_HAVE_TEMPESTREMAP 2875  if( data1.tempestData.remapper != nullptr ) // this is the case this is part of intx; 2876  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 2877 #endif 2878  Range ents_of_interest; 2879  if( *type1 == 1 ) 2880  { 2881  assert( gdsTag ); 2882  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval ); 2883  valuesComp1.resize( ents_of_interest.size() * lenTagType1 ); 2884  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); 2885  } 2886  else if( *type1 == 2 ) 2887  { 2888  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval ); 2889  valuesComp1.resize( ents_of_interest.size() ); 2890  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids 2891  } 2892  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell 2893  { 2894  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval ); 2895  valuesComp1.resize( ents_of_interest.size() ); 2896  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids 2897  } 2898  else 2899  { 2900  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3 2901  } 2902  // now fill the tuple list with info and markers 2903  // because we will send only the ids, order and compress the list 2904  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() ); 2905  TLcomp1.resize( uniq.size() ); 2906  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ ) 2907  { 2908  // to proc, marker, element local index, index in el 2909  int marker = *sit; 2910  int to_proc = marker % numProcs; 2911  int n = TLcomp1.get_n(); 2912  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor 2913  TLcomp1.vi_wr[2 * n + 1] = marker; 2914  TLcomp1.inc_n(); 2915  } 2916  } 2917  2918  ProcConfig pc( global ); // proc config does the crystal router 2919  pc.crystal_router()->gs_transfer( 1, TLcomp1, 2920  0 ); // communication towards joint tasks, with markers 2921  // sort by value (key 1) 2922 #ifdef VERBOSE 2923  std::stringstream ff1; 2924  ff1 << "TLcomp1_" << localRank << ".txt"; 2925  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append! 2926 #endif 2927  moab::TupleList::buffer sort_buffer; 2928  sort_buffer.buffer_init( TLcomp1.get_n() ); 2929  TLcomp1.sort( 1, &sort_buffer ); 2930  sort_buffer.reset(); 2931 #ifdef VERBOSE 2932  // after sorting 2933  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append! 2934 #endif 2935  // do the same, for the other component, number2, with type2 2936  // start copy 2937  TLcomp2.enableWriteAccess(); 2938  // populate second tuple 2939  std::vector< int > valuesComp2; 2940  if( *pid2 >= 0 ) 2941  { 2942  appData& data2 = context.appDatas[*pid2]; 2943  EntityHandle fset2 = data2.file_set; 2944  // in case of tempest remap, get the coverage set 2945 #ifdef MOAB_HAVE_TEMPESTREMAP 2946  if( data2.tempestData.remapper != nullptr ) // this is the case this is part of intx; 2947  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 2948 #endif 2949  2950  Range ents_of_interest; 2951  if( *type2 == 1 ) 2952  { 2953  assert( gdsTag ); 2954  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval ); 2955  valuesComp2.resize( ents_of_interest.size() * lenTagType1 ); 2956  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); 2957  } 2958  else if( *type2 == 2 ) 2959  { 2960  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval ); 2961  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here 2962  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids 2963  } 2964  else if( *type2 == 3 ) 2965  { 2966  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval ); 2967  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here 2968  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids 2969  } 2970  else 2971  { 2972  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 2973  } 2974  // now fill the tuple list with info and markers 2975  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() ); 2976  TLcomp2.resize( uniq.size() ); 2977  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ ) 2978  { 2979  // to proc, marker, element local index, index in el 2980  int marker = *sit; 2981  int to_proc = marker % numProcs; 2982  int n = TLcomp2.get_n(); 2983  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor 2984  TLcomp2.vi_wr[2 * n + 1] = marker; 2985  TLcomp2.inc_n(); 2986  } 2987  } 2988  pc.crystal_router()->gs_transfer( 1, TLcomp2, 2989  0 ); // communication towards joint tasks, with markers 2990  // sort by value (key 1) 2991 #ifdef VERBOSE 2992  std::stringstream ff2; 2993  ff2 << "TLcomp2_" << localRank << ".txt"; 2994  TLcomp2.print_to_file( ff2.str().c_str() ); 2995 #endif 2996  sort_buffer.buffer_reserve( TLcomp2.get_n() ); 2997  TLcomp2.sort( 1, &sort_buffer ); 2998  sort_buffer.reset(); 2999  // end copy 3000 #ifdef VERBOSE 3001  TLcomp2.print_to_file( ff2.str().c_str() ); 3002 #endif 3003  // need to send back the info, from the rendezvous point, for each of the values 3004  /* so go over each value, on local process in joint communicator 3005  3006  now have to send back the info needed for communication; 3007  loop in in sync over both TLComp1 and TLComp2, in local process; 3008  So, build new tuple lists, to send synchronous communication 3009  populate them at the same time, based on marker, that is indexed 3010  */ 3011  3012  TupleList TLBackToComp1; 3013  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2, 3014  TLBackToComp1.enableWriteAccess(); 3015  3016  TupleList TLBackToComp2; 3017  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc, 3018  TLBackToComp2.enableWriteAccess(); 3019  3020  int n1 = TLcomp1.get_n(); 3021  int n2 = TLcomp2.get_n(); 3022  3023  int indexInTLComp1 = 0; 3024  int indexInTLComp2 = 0; // advance both, according to the marker 3025  if( n1 > 0 && n2 > 0 ) 3026  { 3027  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done 3028  { 3029  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1]; 3030  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1]; 3031  if( currentValue1 < currentValue2 ) 3032  { 3033  // we have a big problem; basically, we are saying that 3034  // dof currentValue is on one model and not on the other 3035  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n"; 3036  indexInTLComp1++; 3037  continue; 3038  } 3039  if( currentValue1 > currentValue2 ) 3040  { 3041  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n"; 3042  indexInTLComp2++; 3043  continue; 3044  } 3045  int size1 = 1; 3046  int size2 = 1; 3047  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] ) 3048  size1++; 3049  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] ) 3050  size2++; 3051  // must be found in both lists, find the start and end indices 3052  for( int i1 = 0; i1 < size1; i1++ ) 3053  { 3054  for( int i2 = 0; i2 < size2; i2++ ) 3055  { 3056  // send the info back to components 3057  int n = TLBackToComp1.get_n(); 3058  TLBackToComp1.reserve(); 3059  TLBackToComp1.vi_wr[3 * n] = 3060  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker 3061  // came from, info from comp2 3062  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?) 3063  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2 3064  n = TLBackToComp2.get_n(); 3065  TLBackToComp2.reserve(); 3066  TLBackToComp2.vi_wr[3 * n] = 3067  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original 3068  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?) 3069  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1 3070  // what if there are repeated markers in TLcomp2? increase just index2 3071  } 3072  } 3073  indexInTLComp1 += size1; 3074  indexInTLComp2 += size2; 3075  } 3076  } 3077  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about 3078  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 ); 3079  3080  if( *pid1 >= 0 ) 3081  { 3082  // we are on original comp 1 tasks 3083  // before ordering 3084  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the 3085  // processors it communicates with 3086 #ifdef VERBOSE 3087  std::stringstream f1; 3088  f1 << "TLBack1_" << localRank << ".txt"; 3089  TLBackToComp1.print_to_file( f1.str().c_str() ); 3090 #endif 3091  sort_buffer.buffer_reserve( TLBackToComp1.get_n() ); 3092  TLBackToComp1.sort( 1, &sort_buffer ); 3093  sort_buffer.reset(); 3094 #ifdef VERBOSE 3095  TLBackToComp1.print_to_file( f1.str().c_str() ); 3096 #endif 3097  // so we are now on pid1, we know now each marker were it has to go 3098  // add a new method to ParCommGraph, to set up the involved_IDs_map 3099  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 ); 3100  } 3101  if( *pid2 >= 0 ) 3102  { 3103  // we are on original comp 1 tasks 3104  // before ordering 3105  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the 3106  // processors it communicates with 3107 #ifdef VERBOSE 3108  std::stringstream f2; 3109  f2 << "TLBack2_" << localRank << ".txt"; 3110  TLBackToComp2.print_to_file( f2.str().c_str() ); 3111 #endif 3112  sort_buffer.buffer_reserve( TLBackToComp2.get_n() ); 3113  TLBackToComp2.sort( 2, &sort_buffer ); 3114  sort_buffer.reset(); 3115 #ifdef VERBOSE 3116  TLBackToComp2.print_to_file( f2.str().c_str() ); 3117 #endif 3118  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 ); 3119  } 3120  return moab::MB_SUCCESS; 3121 } 3122  3123 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid ) 3124 { 3125  appData& data = context.appDatas[*pid]; 3126  ParallelComm* pco = context.appDatas[*pid].pcomm; 3127  // collapse vertices and transform cells into triangles/quads /polys 3128  // tags we care about: area, frac, global id 3129  std::vector< Tag > tagsList; 3130  Tag tag; 3131  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag ); 3132  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort 3133  tagsList.push_back( tag ); 3134  rval = context.MBI->tag_get_handle( "area", tag ); 3135  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag ); 3136  rval = context.MBI->tag_get_handle( "frac", tag ); 3137  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag ); 3138  double tol = 1.0e-9; 3139  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval ); 3140  3141  // clean material sets of cells that were deleted 3142  rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1, 3143  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval ); 3144  3145  if( !data.mat_sets.empty() ) 3146  { 3147  EntityHandle matSet = data.mat_sets[0]; 3148  Range elems; 3149  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval ); 3150  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval ); 3151  // put back only cells from data.file_set 3152  elems.clear(); 3153  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval ); 3154  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval ); 3155  } 3156  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval ); 3157  3158  ParallelMergeMesh pmm( pco, tol ); 3159  rval = pmm.merge( data.file_set, 3160  /* do not do local merge*/ false, 3161  /* 2d cells*/ 2 );MB_CHK_ERR( rval ); 3162  3163  // assign global ids only for vertices, cells have them fine 3164  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval ); 3165  3166  // set the partition tag on the file set 3167  Tag part_tag; 3168  int dum_id = -1; 3169  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag, 3170  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id ); 3171  3172  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) ) 3173  { 3174  std::cout << " can't get par part tag.\n"; 3175  return moab::MB_FAILURE; 3176  } 3177  3178  int rank = pco->rank(); 3179  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval ); 3180  3181  return moab::MB_SUCCESS; 3182 } 3183  3184 #ifdef MOAB_HAVE_TEMPESTREMAP 3185  3186 // this call must be collective on the joint communicator 3187 // intersection tasks on coupler will need to send to the components tasks the list of 3188 // id elements that are relevant: they intersected some of the target elements (which are not needed 3189 // here) 3190 // in the intersection 3191 ErrCode iMOAB_CoverageGraph( MPI_Comm* joint_communicator, 3192  iMOAB_AppID pid_src, 3193  iMOAB_AppID pid_migr, 3194  iMOAB_AppID pid_intx, 3195  int* source_id, 3196  int* migration_id, 3197  int* context_id ) 3198 { 3199  // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this exchange 3200  std::vector< int > srcSenders, receivers; 3201  ParCommGraph* sendGraph = nullptr; 3202  bool is_fortran_context = false; 3203  3204  if( pid_src && *pid_src > 0 && context.appDatas.find( *pid_src ) == context.appDatas.end() ) 3205  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid source application ID specified: " << *pid_src ); 3206  if( pid_migr && *pid_migr > 0 && context.appDatas.find( *pid_migr ) == context.appDatas.end() ) 3207  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid migration/coverage application ID specified: " << *pid_migr ); 3208  if( pid_intx && *pid_intx > 0 && context.appDatas.find( *pid_intx ) == context.appDatas.end() ) 3209  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid intersection application ID specified: " << *pid_intx ); 3210  3211  // First, find the original graph between PE sets 3212  // And based on this one, we will build the newly modified one for coverage 3213  if( *pid_src >= 0 ) 3214  { 3215  appData& dataSrc = context.appDatas[*pid_src]; 3216  int default_context_id = *migration_id; // the other one 3217  assert( dataSrc.global_id == *source_id ); 3218  is_fortran_context = dataSrc.is_fortran || is_fortran_context; 3219  if( dataSrc.pgraph.find( default_context_id ) != dataSrc.pgraph.end() ) 3220  sendGraph = dataSrc.pgraph[default_context_id]; // maybe check if it does not exist 3221  else 3222  MB_CHK_SET_ERR( moab::MB_FAILURE, "Could not find source ParCommGraph with default migration context" ); 3223  3224  // report the sender and receiver tasks in the joint comm 3225  srcSenders = sendGraph->senders(); 3226  receivers = sendGraph->receivers(); 3227 #ifdef VERBOSE 3228  std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl; 3229 #endif 3230  } 3231  3232  ParCommGraph* recvGraph = nullptr; // will be non null on receiver tasks (intx tasks) 3233  if( *pid_migr >= 0 ) 3234  { 3235  appData& dataMigr = context.appDatas[*pid_migr]; 3236  is_fortran_context = dataMigr.is_fortran || is_fortran_context; 3237  // find the original one 3238  int default_context_id = *source_id; 3239  assert( dataMigr.global_id == *migration_id ); 3240  if( dataMigr.pgraph.find( default_context_id ) != dataMigr.pgraph.end() ) 3241  recvGraph = dataMigr.pgraph[default_context_id]; 3242  else 3243  MB_CHK_SET_ERR( moab::MB_FAILURE, 3244  "Could not find coverage receiver ParCommGraph with default migration context" ); 3245  // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view 3246  srcSenders = recvGraph->senders(); 3247  receivers = recvGraph->receivers(); 3248 #ifdef VERBOSE 3249  std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl; 3250 #endif 3251  } 3252  3253  if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context; 3254  3255  // loop over pid_intx elements, to see what original processors in joint comm have sent the 3256  // coverage mesh; If we are on intx tasks, send coverage info towards original component tasks, 3257  // about needed cells 3258  TupleList TLcovIDs; 3259  TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent 3260  // will push_back a new tuple, if needed 3261  TLcovIDs.enableWriteAccess(); 3262  // the crystal router will send ID cell to the original source, on the component task 3263  // if we are on intx tasks, loop over all intx elements and 3264  3265  MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) 3266  : *joint_communicator ); 3267  int currentRankInJointComm = -1;CHK_MPI_ERR( MPI_Comm_rank( global, &currentRankInJointComm ) ); 3268  3269  // Global id of the coverage source elements 3270  Tag gidTag = context.MBI->globalId_tag(); 3271  if( !gidTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort 3272  3273  // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we 3274  // need to send information towards component tasks 3275  if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) != 3276  receivers.end() ) // we are on receivers tasks, we can request intx info 3277  { 3278  appData& dataIntx = context.appDatas[*pid_intx]; 3279  EntityHandle intx_set = dataIntx.file_set; 3280  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh ); 3281  3282  Tag orgSendProcTag; 3283  MB_CHK_ERR( context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag ) ); 3284  if( !orgSendProcTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort 3285  3286  Range intx_cells; 3287  // get all entities from the intersection set, and look at their SourceParent 3288  MB_CHK_ERR( context.MBI->get_entities_by_dimension( intx_set, 2, intx_cells ) ); 3289  // we did not compute the intersection (perhaps it was loaded from disk) 3290  // so then just get the cells from covering mesh (no need to filter participating elements only) 3291  3292  // send that info back to enhance parCommGraph cache 3293  std::map< int, std::set< int > > idsFromProcs; 3294  if( intx_cells.size() ) 3295  { 3296  Tag parentTag; 3297  // global id of the source element corresponding to intersection cell 3298  MB_CHK_ERR( context.MBI->tag_get_handle( "SourceParent", parentTag ) ); 3299  if( !parentTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort 3300  3301  for( auto it = intx_cells.begin(); it != intx_cells.end(); ++it ) 3302  { 3303  EntityHandle intx_cell = *it; 3304  3305  int origProc; // look at receivers 3306  MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc ) ); 3307  3308  // we have augmented the overlap set with ghost cells ; in that case, the 3309  // orig_sending_processor is not set so it will be -1; 3310  // if( origProc < 0 || currentRankInJointComm == origProc ) continue; 3311  if( origProc < 0 ) continue; 3312  3313  int gidCell; // get the global id of the cell for association 3314  MB_CHK_ERR( context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell ) ); 3315  idsFromProcs[origProc].insert( gidCell ); 3316  // printf( "%d: active-intx: adding %d for proc %d\n", currentRankInJointComm, gidCell, origProc ); 3317  } 3318  } 3319  3320  // NOTE: if we have no intx cells, we fall back to coverage set anyway 3321  // In either case, let us iterate over the coverage mesh and compute 3322  // the right connections needed for the coverage graph 3323  { 3324  // get all entities from the source covering set 3325  Range cover_cells; 3326  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, cover_cells ) ); 3327  3328  // get all cells from coverage set 3329  Tag gidTag = context.MBI->globalId_tag(); 3330  3331  // look at their orig_sending_processor and associate to global ID 3332  for( auto it = cover_cells.begin(); it != cover_cells.end(); ++it ) 3333  { 3334  const EntityHandle cov_cell = *it; 3335  3336  int origProc; // look at receivers 3337  MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &cov_cell, 1, &origProc ) ); 3338  3339  // we have augmented the overlap set with ghost cells ; in that case, the 3340  // orig_sending_processor is not set so it will be -1; 3341  if( origProc < 0 ) continue; 3342  3343  int gidCell; // get the global id of the cell for association 3344  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, &cov_cell, 1, &gidCell ) ); 3345  assert( gidCell > 0 ); 3346  idsFromProcs[origProc].insert( gidCell ); 3347  } 3348  } 3349  3350 #ifdef VERBOSE 3351  std::ofstream dbfile; 3352  std::stringstream outf; 3353  outf << "idsFromProc_0" << currentRankInJointComm << ".txt"; 3354  dbfile.open( outf.str().c_str() ); 3355  dbfile << "Writing this to a file.\n"; 3356  3357  dbfile << " map size:" << idsFromProcs.size() 3358  << std::endl; // on the receiver side, these show how much data to receive 3359  // from the sender (how many ids, and how much tag data later; we need to size up the 3360  // receiver buffer) arrange in tuples , use map iterators to send the ids 3361  for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ ) 3362  { 3363  std::set< int >& setIds = mt->second; 3364  dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n"; 3365  int counter = 0; 3366  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ ) 3367  { 3368  int valueID = *st; 3369  dbfile << " " << valueID; 3370  counter++; 3371  if( counter % 10 == 0 ) dbfile << "\n"; 3372  } 3373  dbfile << "\n"; 3374  } 3375  dbfile.close(); 3376 #endif 3377  3378  if( nullptr != recvGraph ) 3379  { 3380  ParCommGraph* newRecvGraph = new ParCommGraph( *recvGraph ); // just copy 3381  newRecvGraph->set_context_id( *context_id ); 3382  newRecvGraph->SetReceivingAfterCoverage( idsFromProcs ); 3383  // this par comm graph will need to use the coverage set 3384  // so we are for sure on intx pes (the receiver is the coupler mesh) 3385  newRecvGraph->set_cover_set( cover_set ); 3386  if( context.appDatas[*pid_migr].pgraph.find( *context_id ) == context.appDatas[*pid_migr].pgraph.end() ) 3387  context.appDatas[*pid_migr].pgraph[*context_id] = newRecvGraph; 3388  else // possible memory leak if context_id is same 3389  MB_CHK_SET_ERR( moab::MB_FAILURE, "ParCommGraph for context_id=" 3390  << *context_id << " already exists. Check the workflow" ); 3391  } 3392  for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); ++mit ) 3393  { 3394  int procToSendTo = mit->first; 3395  std::set< int >& idSet = mit->second; 3396  for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); ++sit ) 3397  { 3398  int n = TLcovIDs.get_n(); 3399  TLcovIDs.reserve(); 3400  TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor 3401  TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range 3402  } 3403  } 3404  } 3405  3406  ProcConfig pc( global ); // proc config does the crystal router 3407  // communication towards component tasks, with what ids are needed 3408  // for each task from receiver 3409  pc.crystal_router()->gs_transfer( 1, TLcovIDs, 0 ); 3410  3411  // a test to know if we are on the sender tasks (original component, in this case, atmosphere) 3412  if( nullptr != sendGraph ) 3413  { 3414  appData& dataSrc = context.appDatas[*pid_src]; 3415  // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each receiver task 3416  ParCommGraph* newSendGraph = new ParCommGraph( *sendGraph ); // just copy 3417  newSendGraph->set_context_id( *context_id ); 3418  dataSrc.pgraph[*context_id] = newSendGraph; 3419  MB_CHK_ERR( newSendGraph->settle_send_graph( TLcovIDs ) ); 3420  } 3421  return moab::MB_SUCCESS; // success 3422 } 3423  3424 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix ) 3425 { 3426  assert( prefix && strlen( prefix ) ); 3427  3428  ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id]; 3429  std::string prefix_str( prefix ); 3430  3431  if( nullptr != cgraph ) 3432  cgraph->dump_comm_information( prefix_str, *is_sender ); 3433  else 3434  { 3435  std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name 3436  << " context: " << *context_id << "\n"; 3437  } 3438  return moab::MB_SUCCESS; 3439 } 3440  3441 #endif // #ifdef MOAB_HAVE_TEMPESTREMAP 3442  3443 #endif // #ifdef MOAB_HAVE_MPI 3444  3445 #ifdef MOAB_HAVE_TEMPESTREMAP 3446  3447 #ifdef MOAB_HAVE_NETCDF 3448  3449 static ErrCode set_aream_from_trivial_distribution( iMOAB_AppID pid, int N, std::vector< double >& trvArea ) 3450 { 3451  // this needs several rounds of communication, because the mesh is distributed differently than trivially 3452  // get all the cells from the pid_source 3453  // get global ids of the cells 3454  // number of local cells: [n/size] nL = [n/size] 3455  // last one extraN = n%size; nL += extraN 3456  // start id = [ rank*nL + 1; endId = (rank+1)*nL 3457  // lst one (rank = =size-1; endId = na 3458  // the last cells get the rest 3459  // construct global ids that correspond to trvArea [, rank * 3460  appData& data = context.appDatas[*pid]; 3461  int size = 1, rank = 0; 3462 #ifdef MOAB_HAVE_MPI 3463  ParallelComm* pcomm = context.appDatas[*pid].pcomm; 3464  size = pcomm->size(); 3465  rank = pcomm->rank(); 3466 #endif 3467  3468  /// The "aream" tag should be created already; error out if not 3469  /// NOTE: This is a bad assumption 3470  /// TODO: Fix it. 3471  Tag areaTag; 3472  MB_CHK_ERR( context.MBI->tag_get_handle( "aream", areaTag ) ); 3473  3474  // start copy 3475  const Range& ents_to_set = data.primary_elems; 3476  size_t nents_to_be_set = ents_to_set.size(); 3477  3478  Tag gidTag = context.MBI->globalId_tag(); 3479  std::vector< int > globalIds( nents_to_be_set ); 3480  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, ents_to_set, &globalIds[0] ) ); 3481  3482  const bool serial = ( size == 1 ); 3483  if( serial ) 3484  { 3485  // we do not assume anymore that the number of entities has to match 3486  // we will set only what matches, and skip entities that do not have corresponding global ids 3487  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 ); 3488  // tags are unrolled, we loop over global ids first, then careful about tags 3489  for( size_t i = 0; i < nents_to_be_set; i++ ) 3490  { 3491  int gid = globalIds[i]; 3492  int indexInVal = 3493  gid - 1; // assume the values are in order of global id, starting from 1 to number of cells 3494  assert( indexInVal < N ); 3495  EntityHandle eh = ents_to_set[i]; 3496  MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &trvArea[indexInVal] ) ); 3497  } 3498  } 3499 #ifdef MOAB_HAVE_MPI 3500  else // it can be not serial only if size > 1, parallel 3501  { 3502  int nL = N / size; // how many global ids per task, except the last one 3503  3504  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it 3505  // we will create first a tuple to rendevous points, then from there send to the processor that requested it 3506  // it is a 2-hop global gather scatter 3507  // TODO: allow for tags of different length; this is wrong 3508  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 ); 3509  TupleList TLreq; 3510  TLreq.initialize( 3, 0, 0, 0, nents_to_be_set ); // to proc, marker(gid), 3511  TLreq.enableWriteAccess(); 3512  // the processor id that processes global_id is global_id / num_ents_per_proc 3513  3514  // send requests to processor that has the global id 3515  for( size_t i = 0; i < nents_to_be_set; i++ ) 3516  { 3517  // to proc, marker, element local index, index in el 3518  int marker = globalIds[i]; 3519  int to_proc = ( marker - 1 ) / nL; // proc from 0 to size - 1 3520  3521  if( to_proc == size ) to_proc = size - 1; // the last array is the longest 3522  int n = TLreq.get_n(); 3523  TLreq.vi_wr[3 * n] = to_proc; // send to processor 3524  TLreq.vi_wr[3 * n + 1] = marker; 3525  TLreq.vi_wr[3 * n + 2] = i; // local index for this global id 3526  TLreq.inc_n(); 3527  } 3528  3529  // send now requests, basically inform the rendez-vous point who needs a particular global id 3530  // send the data to the other processors: 3531  ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 ); 3532  3533  int sizeBack = TLreq.get_n(); 3534  // start copy from comm graph settle 3535  TupleList TLBack; 3536  TLBack.initialize( 3, 0, 0, 1, sizeBack ); // to proc, marker, tag from proc , tag values 3537  TLBack.enableWriteAccess(); 3538  for( int i = 0; i < sizeBack; i++ ) 3539  { 3540  int from_proc = TLreq.vi_wr[3 * i]; // send to processor 3541  TLBack.vi_wr[3 * i] = from_proc; 3542  int marker = TLreq.vi_wr[3 * i + 1]; // the actual global id 3543  TLBack.vi_wr[3 * i + 1] = marker; 3544  TLBack.vi_wr[3 * i + 2] = TLreq.vi_wr[3 * i + 2]; // the original index this came from 3545  // this marker should give an idea of what index is actually needed for value 3546  int index = marker - 1 - rank * nL; 3547  TLBack.vr_wr[i] = trvArea[index]; // !!! big assumptions about indices 3548  TLBack.inc_n(); 3549  } 3550  3551  ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 ); 3552  3553  int n1 = TLBack.get_n(); // should be the number of nents_to_be_set 3554  for( int i = 0; i < n1; i++ ) 3555  { 3556  // int gid = TLBack.vi_rd[3 * i + 1]; // marker 3557  int origIndex = TLBack.vi_rd[3 * i + 2]; 3558  EntityHandle eh = ents_to_set[origIndex]; 3559  MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &TLBack.vr_rd[i] ) ); 3560  } 3561  } 3562 #endif 3563  // end copy 3564  3565  return MB_SUCCESS; 3566 } 3567  3568 ErrCode iMOAB_LoadMappingWeightsFromFile( 3569  iMOAB_AppID pid_source, 3570  iMOAB_AppID pid_target, 3571  iMOAB_AppID pid_intersection, 3572  int* srctype, 3573  int* tgttype, 3574  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */ 3575  const iMOAB_String remap_weights_filename ) 3576 { 3577  assert( srctype && tgttype ); 3578  3579  // get the local degrees of freedom, from the pid_cpl and type of mesh 3580  // Get the source and target data and pcomm objects 3581  appData& data_source = context.appDatas[*pid_source]; 3582  appData& data_target = context.appDatas[*pid_target]; 3583  appData& data_intx = context.appDatas[*pid_intersection]; 3584  TempestMapAppData& tdata = data_intx.tempestData; 3585  3586  // check if the remapped context is null; we need to fix that, if so 3587  if( tdata.remapper == nullptr ) 3588  { 3589  // user has not called the coverage mesh computation routine -- so explicitly call it now 3590  // this check supports the traditional workflow of directly computing mesh intersection 3591  // and letting this routine compute coverage mesh as needed 3592  MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_source, pid_target, pid_intersection ) ); 3593  } 3594  3595  // Setup loading of weights onto TempestOnlineMap 3596  // Set the context for the remapping weights computation 3597  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper ); 3598  3599  // Now allocate and initialize the remapper object 3600  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )]; 3601  assert( weightMap != nullptr ); 3602  3603  EntityHandle source_set = data_source.file_set; 3604  EntityHandle covering_set = tdata.remapper->GetMeshSet( Remapper::CoveringMesh ); 3605  EntityHandle target_set = data_target.file_set; // default: row based partition 3606  3607  int src_elem_dof_length = 1, tgt_elem_dof_length = 1; // default=1: FV - element average DoF value 3608  // tags of interest are either GLOBAL_DOFS (SE) or GLOBAL_ID (FV) 3609  Tag gdsTag = nullptr; 3610  if( *srctype == 1 || *tgttype == 1 ) 3611  { 3612  MB_CHK_ERR( context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag ) ); 3613  assert( gdsTag ); 3614  } 3615  3616  // Find the DoF tag length 3617  // if it fails, usually it is 16 3618  // first check if we need to query the source 3619  if( *srctype == 1 ) // spectral element 3620  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, src_elem_dof_length ) ); 3621  // find the DoF tag length 3622  // next check if we need to query the target 3623  if( *tgttype == 1 ) // spectral element 3624  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) ); 3625  3626  Tag gidTag = context.MBI->globalId_tag(); 3627  std::vector< int > srcDofValues, tgtDofValues; 3628  3629  // populate first tuple 3630  // will be filled with entities on coupler, from which we will get the DOFs, based on type 3631  Range srcc_ents_of_interest, src_ents_of_interest, tgt_ents_of_interest; 3632  3633  if( *srctype == 1 ) // spectral element 3634  { 3635  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, src_elem_dof_length ) ); 3636  MB_CHK_ERR( context.MBI->get_entities_by_type( covering_set, MBQUAD, src_ents_of_interest ) ); 3637  srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length ); 3638  MB_CHK_ERR( context.MBI->tag_get_data( gdsTag, src_ents_of_interest, &srcDofValues[0] ) ); 3639  } 3640  else if( *srctype == 2 ) 3641  { 3642  // vertex global ids 3643  MB_CHK_ERR( context.MBI->get_entities_by_type( covering_set, MBVERTEX, src_ents_of_interest ) ); 3644  srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length ); 3645  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, src_ents_of_interest, &srcDofValues[0] ) ); 3646  } 3647  else if( *srctype == 3 ) // for FV meshes, just get the global id of cell 3648  { 3649  // element global ids 3650  MB_CHK_ERR( context.MBI->get_entities_by_dimension( covering_set, 2, src_ents_of_interest ) ); 3651  srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length ); 3652  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, src_ents_of_interest, &srcDofValues[0] ) ); 3653  } 3654  else 3655  { 3656  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3 3657  } 3658  3659  if( *tgttype == 1 ) // spectral element 3660  { 3661  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) ); 3662  MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBQUAD, tgt_ents_of_interest ) ); 3663  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length ); 3664  MB_CHK_ERR( context.MBI->tag_get_data( gdsTag, tgt_ents_of_interest, &tgtDofValues[0] ) ); 3665  } 3666  else if( *tgttype == 2 ) 3667  { 3668  // vertex global ids 3669  MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBVERTEX, tgt_ents_of_interest ) ); 3670  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length ); 3671  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) ); 3672  } 3673  else if( *tgttype == 3 ) // for FV meshes, just get the global id of cell 3674  { 3675  // element global ids 3676  MB_CHK_ERR( context.MBI->get_entities_by_dimension( target_set, 2, tgt_ents_of_interest ) ); 3677  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length ); 3678  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) ); 3679  } 3680  else 3681  { 3682  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3 3683  } 3684  3685  // pass tgt ordered dofs, and unique 3686  // we need to read area_b and set aream tag on target cells, too 3687  std::vector< int > sortTgtDofs( tgtDofValues.begin(), tgtDofValues.end() ); 3688  std::sort( sortTgtDofs.begin(), sortTgtDofs.end() ); 3689  sortTgtDofs.erase( std::unique( sortTgtDofs.begin(), sortTgtDofs.end() ), sortTgtDofs.end() ); // remove duplicates 3690  3691  /// the tag should be created already in the e3sm workflow; if not, create it here 3692  Tag areaTag; 3693  ErrorCode rval = 3694  context.MBI->tag_get_handle( "aream", 1, MB_TYPE_DOUBLE, areaTag, MB_TAG_DENSE | MB_TAG_EXCL | MB_TAG_CREAT ); 3695  if( MB_ALREADY_ALLOCATED == rval ) 3696  { 3697 #ifdef MOAB_HAVE_MPI 3698  if( 0 == data_intx.pcomm->rank() ) 3699 #endif 3700  std::cout << " aream tag already defined \n "; 3701  } 3702  3703  std::vector< double > trvAreaA, trvAreaB; // passed by reference 3704  int nA, nB; // passed by reference, so returned 3705  MB_CHK_SET_ERR( weightMap->ReadParallelMap( remap_weights_filename, sortTgtDofs, trvAreaA, nA, trvAreaB, nB ), 3706  "reading map from disk failed" ); 3707  // trivially distributed areaAs and areaBs will need to be set on their correct source and target cells, as an aream tag 3708  3709  // if we are on target mesh (row based partition) 3710  tdata.pid_src = pid_source; 3711  tdata.pid_dest = pid_target; 3712  3713  // TODO: now if coverage set exists, let us perform a filter based on 3714  // available source/target nnz data in the map. The idea is to look at the 3715  // source coverage and target meshes and to figure out only relevant elements 3716  // that need to participate in the meshes. 3717  3718  tdata.remapper->SetMeshSet( Remapper::SourceMesh, source_set, &srcc_ents_of_interest ); 3719  // we have read the area A from map file, and we will set it as a aream double tag on the source set, knowing that we 3720  // read it trivially, with a trivial distribution by the global DOFs 3721  // local , private method: 3722  MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_source, nA, trvAreaA ), " fail to set aream on source " ); 3723  MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_target, nB, trvAreaB ), " fail to set aream on target " ); 3724  3725  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, covering_set, &src_ents_of_interest ); 3726  weightMap->SetSourceNDofsPerElement( src_elem_dof_length ); 3727  weightMap->set_col_dc_dofs( srcDofValues ); // will set col_dtoc_dofmap 3728  3729  tdata.remapper->SetMeshSet( Remapper::TargetMesh, target_set, &tgt_ents_of_interest ); 3730  weightMap->SetDestinationNDofsPerElement( tgt_elem_dof_length ); 3731  weightMap->set_row_dc_dofs( tgtDofValues ); // will set row_dtoc_dofmap 3732  3733  // TODO: ideally, we should get this from the remap_weights_filename and just propagate it 3734  std::string metadataStr = std::string( remap_weights_filename ) + ";FV:1:GLOBAL_ID;FV:1:GLOBAL_ID"; 3735  3736  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr; 3737  3738  return moab::MB_SUCCESS; 3739 } 3740  3741 ErrCode iMOAB_WriteMappingWeightsToFile( 3742  iMOAB_AppID pid_intersection, 3743  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */ 3744  const iMOAB_String remap_weights_filename ) 3745 { 3746  assert( solution_weights_identifier && strlen( solution_weights_identifier ) ); 3747  assert( remap_weights_filename && strlen( remap_weights_filename ) ); 3748  3749  ErrorCode rval; 3750  3751  // Get the source and target data and pcomm objects 3752  appData& data_intx = context.appDatas[*pid_intersection]; 3753  TempestMapAppData& tdata = data_intx.tempestData; 3754  3755  // Get the handle to the remapper object 3756  assert( tdata.remapper != nullptr ); 3757  3758  // Now get online weights object and ensure it is valid 3759  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )]; 3760  assert( weightMap != nullptr ); 3761  3762  std::string filename = std::string( remap_weights_filename ); 3763  3764  std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )]; 3765  std::map< std::string, std::string > attrMap; 3766  attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator"; 3767  attrMap["normalization"] = "ovarea"; 3768  attrMap["map_aPb"] = filename; 3769  3770  // const std::string delim = ";"; 3771  // size_t pos = 0, index = 0; 3772  // std::vector< std::string > stringAttr( 3 ); 3773  // // use find() function to get the position of the delimiters 3774  // while( ( pos = metadataStr.find( delim ) ) != std::string::npos ) 3775  // { 3776  // std::string token1 = metadataStr.substr( 0, pos ); // store the substring 3777  // if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1; 3778  // std::cout << "\t Found token: " << token1 << std::endl; 3779  // metadataStr.erase( 3780  // 0, pos + delim.length() ); /* erase() function store the current positon and move to next token. */ 3781  // } 3782  // std::cout << "\t Found token: " << metadataStr << " -- and index = " << index << std::endl; 3783  // stringAttr[index] = metadataStr; // store the last token of the string. 3784  // assert( index == 2 ); 3785  // attrMap["remap_options"] = stringAttr[0]; 3786  // attrMap["methodorder_b"] = stringAttr[1]; 3787  // attrMap["methodorder_a"] = stringAttr[2]; 3788  attrMap["concave_a"] = "false"; // defaults 3789  attrMap["concave_b"] = "false"; // defaults 3790  attrMap["bubble"] = "true"; // defaults 3791  attrMap["MOABversion"] = std::string( MOAB_VERSION ); 3792  3793  // Write the map file to disk in parallel using either HDF5 or SCRIP interface 3794  rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval ); 3795  3796  return moab::MB_SUCCESS; 3797 } 3798  3799 #ifdef MOAB_HAVE_MPI 3800 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1, 3801  iMOAB_AppID pid2, 3802  iMOAB_AppID pid3, 3803  MPI_Comm* jointcomm, 3804  MPI_Group* groupA, 3805  MPI_Group* groupB, 3806  int* type, 3807  int* comp1, 3808  int* comp2, 3809  int* direction ) 3810 { 3811  assert( jointcomm ); 3812  assert( groupA ); 3813  assert( groupB ); 3814  bool is_fortran = false; 3815  if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran; 3816  if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran; 3817  if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran; 3818  3819  MPI_Comm joint_communicator = 3820  ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm ); 3821  MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA ); 3822  MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB ); 3823  3824  ErrorCode rval = MB_SUCCESS; 3825  int localRank = 0, numProcs = 1; 3826  3827  // Get the local rank and number of processes in the joint communicator 3828  MPI_Comm_rank( joint_communicator, &localRank ); 3829  MPI_Comm_size( joint_communicator, &numProcs ); 3830  3831  // Next instantiate the par comm graph 3832  // here we need to look at direction 3833  // this direction is good for atm source -> ocn target coupler example 3834  ParCommGraph* cgraph = nullptr; 3835  if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 ); 3836  3837  ParCommGraph* cgraph_rev = nullptr; 3838  if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 ); 3839  3840  // we should search if we have another pcomm with the same comp ids in the list already 3841  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph 3842  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp 3843  if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev; // from 2 to 1 3844  3845  // each model has a list of global ids that will need to be sent by gs to rendezvous the other 3846  // model on the joint_communicator 3847  TupleList TLcomp1; 3848  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker 3849  TupleList TLcomp2; 3850  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker 3851  3852  // will push_back a new tuple, if needed 3853  TLcomp1.enableWriteAccess(); 3854  3855  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID 3856  Tag gdsTag; 3857  3858  // find the values on first cell 3859  int lenTagType1 = 1; 3860  if( *type == 1 ) 3861  { 3862  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval ); 3863  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16 3864  } 3865  Tag tagType2 = context.MBI->globalId_tag(); 3866  3867  std::vector< int > valuesComp1; 3868  3869  // populate first tuple 3870  Range ents_of_interest; // will be filled with entities on pid1, that need to be distributed, 3871  // rearranged in split_ranges map 3872  if( *pid1 >= 0 ) 3873  { 3874  appData& data1 = context.appDatas[*pid1]; 3875  EntityHandle fset1 = data1.file_set; 3876  3877  if( *type == 1 ) 3878  { 3879  assert( gdsTag ); 3880  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval ); 3881  valuesComp1.resize( ents_of_interest.size() * lenTagType1 ); 3882  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); 3883  } 3884  else if( *type == 2 ) 3885  { 3886  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval ); 3887  valuesComp1.resize( ents_of_interest.size() ); 3888  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids 3889  } 3890  else if( *type == 3 ) // for FV meshes, just get the global id of cell 3891  { 3892  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval ); 3893  valuesComp1.resize( ents_of_interest.size() ); 3894  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids 3895  } 3896  else 3897  { 3898  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3 3899  } 3900  // now fill the tuple list with info and markers 3901  // because we will send only the ids, order and compress the list 3902  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() ); 3903  TLcomp1.resize( uniq.size() ); 3904  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ ) 3905  { 3906  // to proc, marker, element local index, index in el 3907  int marker = *sit; 3908  int to_proc = marker % numProcs; 3909  int n = TLcomp1.get_n(); 3910  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor 3911  TLcomp1.vi_wr[2 * n + 1] = marker; 3912  TLcomp1.inc_n(); 3913  } 3914  } 3915  3916  ProcConfig pc( joint_communicator ); // proc config does the crystal router 3917  pc.crystal_router()->gs_transfer( 1, TLcomp1, 3918  0 ); // communication towards joint tasks, with markers 3919  // sort by value (key 1) 3920 #ifdef VERBOSE 3921  std::stringstream ff1; 3922  ff1 << "TLcomp1_" << localRank << ".txt"; 3923  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append! 3924 #endif 3925  moab::TupleList::buffer sort_buffer; 3926  sort_buffer.buffer_init( TLcomp1.get_n() ); 3927  TLcomp1.sort( 1, &sort_buffer ); 3928  sort_buffer.reset(); 3929 #ifdef VERBOSE 3930  // after sorting 3931  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append! 3932 #endif 3933  // do the same, for the other component, number2, with type2 3934  // start copy 3935  TLcomp2.enableWriteAccess(); 3936  3937  moab::TempestOnlineMap* weightMap = nullptr; // declare it outside, but it will make sense only for *pid >= 0 3938  // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist) 3939  // populate second tuple with ids from read map: we need row_gdofmap and col_gdofmap 3940  std::vector< int > valuesComp2; 3941  if( *pid2 >= 0 ) // we are now on coupler, map side 3942  { 3943  appData& data2 = context.appDatas[*pid2]; 3944  TempestMapAppData& tdata = data2.tempestData; 3945  // should be only one map, read from file 3946  assert( tdata.weightMaps.size() == 1 ); 3947  // maybe we need to check it is the map we expect 3948  weightMap = tdata.weightMaps.begin()->second; 3949  // std::vector<int> ids_of_interest; 3950  // do a deep copy of the ids of interest: row ids or col ids, target or source direction 3951  if( *direction == 1 ) 3952  { 3953  // we are interested in col ids, source 3954  // new method from moab::TempestOnlineMap 3955  rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval ); 3956  } 3957  else if( *direction == 2 ) 3958  { 3959  // we are interested in row ids, for target ids 3960  rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval ); 3961  } 3962  // 3963  3964  // now fill the tuple list with info and markers 3965  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() ); 3966  TLcomp2.resize( uniq.size() ); 3967  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ ) 3968  { 3969  // to proc, marker, element local index, index in el 3970  int marker = *sit; 3971  int to_proc = marker % numProcs; 3972  int n = TLcomp2.get_n(); 3973  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor 3974  TLcomp2.vi_wr[2 * n + 1] = marker; 3975  TLcomp2.inc_n(); 3976  } 3977  } 3978  pc.crystal_router()->gs_transfer( 1, TLcomp2, 3979  0 ); // communication towards joint tasks, with markers 3980  // sort by value (key 1) 3981  // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs 3982 #ifdef VERBOSE 3983  std::stringstream ff2; 3984  ff2 << "TLcomp2_" << localRank << ".txt"; 3985  TLcomp2.print_to_file( ff2.str().c_str() ); 3986 #endif 3987  sort_buffer.buffer_reserve( TLcomp2.get_n() ); 3988  TLcomp2.sort( 1, &sort_buffer ); 3989  sort_buffer.reset(); 3990  // end copy 3991 #ifdef VERBOSE 3992  TLcomp2.print_to_file( ff2.str().c_str() ); 3993 #endif 3994  // need to send back the info, from the rendezvous point, for each of the values 3995  /* so go over each value, on local process in joint communicator, 3996  3997  now have to send back the info needed for communication; 3998  loop in in sync over both TLComp1 and TLComp2, in local process; 3999  So, build new tuple lists, to send synchronous communication 4000  populate them at the same time, based on marker, that is indexed 4001  */ 4002  4003  TupleList TLBackToComp1; 4004  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2, 4005  TLBackToComp1.enableWriteAccess(); 4006  4007  TupleList TLBackToComp2; 4008  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc, 4009  TLBackToComp2.enableWriteAccess(); 4010  4011  int n1 = TLcomp1.get_n(); 4012  int n2 = TLcomp2.get_n(); 4013  4014  int indexInTLComp1 = 0; 4015  int indexInTLComp2 = 0; // advance both, according to the marker 4016  if( n1 > 0 && n2 > 0 ) 4017  { 4018  4019  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done 4020  { 4021  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1]; 4022  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1]; 4023  if( currentValue1 < currentValue2 ) 4024  { 4025  // we have a big problem; basically, we are saying that 4026  // dof currentValue is on one model and not on the other 4027  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n"; 4028  indexInTLComp1++; 4029  continue; 4030  } 4031  if( currentValue1 > currentValue2 ) 4032  { 4033  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n"; 4034  indexInTLComp2++; 4035  continue; 4036  } 4037  int size1 = 1; 4038  int size2 = 1; 4039  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] ) 4040  size1++; 4041  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] ) 4042  size2++; 4043  // must be found in both lists, find the start and end indices 4044  for( int i1 = 0; i1 < size1; i1++ ) 4045  { 4046  for( int i2 = 0; i2 < size2; i2++ ) 4047  { 4048  // send the info back to components 4049  int n = TLBackToComp1.get_n(); 4050  TLBackToComp1.reserve(); 4051  TLBackToComp1.vi_wr[3 * n] = 4052  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker 4053  // came from, info from comp2 4054  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?) 4055  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2 4056  n = TLBackToComp2.get_n(); 4057  TLBackToComp2.reserve(); 4058  TLBackToComp2.vi_wr[3 * n] = 4059  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original 4060  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?) 4061  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1 4062  // what if there are repeated markers in TLcomp2? increase just index2 4063  } 4064  } 4065  indexInTLComp1 += size1; 4066  indexInTLComp2 += size2; 4067  } 4068  } 4069  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about 4070  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 ); 4071  4072  TupleList TLv; // vertices 4073  TupleList TLc; // cells if needed (not type 2) 4074  4075  if( *pid1 >= 0 ) 4076  { 4077  // we are on original comp 1 tasks 4078  // before ordering 4079  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the 4080  // processors it communicates with 4081 #ifdef VERBOSE 4082  std::stringstream f1; 4083  f1 << "TLBack1_" << localRank << ".txt"; 4084  TLBackToComp1.print_to_file( f1.str().c_str() ); 4085 #endif 4086  // so we are now on pid1, we know now each marker were it has to go 4087  // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map 4088  rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval ); 4089  // we can just send vertices and elements, with crystal routers; 4090  // on the receiving end, make sure they are not duplicated, by looking at the global id 4091  // if *type is 1, also send global_dofs tag in the element tuple 4092  rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval ); 4093  } 4094  else 4095  { 4096  TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here, for sure 4097  TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task 4098  if( *type != 2 ) // for point cloud, we do not need to initialize TLc (for cells) 4099  { 4100  // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh 4101  int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 4102  10; // 10 is the max number of vertices in cell; kind of arbitrary 4103  TLc.initialize( size_tuple, 0, 0, 0, 0 ); 4104  TLc.enableWriteAccess(); 4105  } 4106  } 4107  pc.crystal_router()->gs_transfer( 1, TLv, 0 ); // communication towards coupler tasks, with mesh vertices 4108  if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 ); // those are cells 4109  4110  if( *pid3 >= 0 ) // will receive the mesh, on coupler pes! 4111  { 4112  appData& data3 = context.appDatas[*pid3]; 4113  EntityHandle fset3 = data3.file_set; 4114  Range primary_ents3; // vertices for type 2, cells of dim 2 for type 1 or 3 4115  std::vector< int > values_entities; // will be the size of primary_ents3 * lenTagType1 4116  rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3, 4117  values_entities );MB_CHK_ERR( rval ); 4118  iMOAB_UpdateMeshInfo( pid3 ); 4119  int ndofPerEl = 1; 4120  if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) ); 4121  // because we are on the coupler, we know that the read map pid2 exists 4122  assert( *pid2 >= 0 ); 4123  appData& dataIntx = context.appDatas[*pid2]; 4124  TempestMapAppData& tdata = dataIntx.tempestData; 4125  4126  // if we are on source coverage, direction 1, we can set covering mesh, covering cells 4127  if( 1 == *direction ) 4128  { 4129  tdata.pid_src = pid3; 4130  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, &primary_ents3 ); 4131  weightMap->SetSourceNDofsPerElement( ndofPerEl ); 4132  weightMap->set_col_dc_dofs( values_entities ); // will set col_dtoc_dofmap 4133  } 4134  // if we are on target, we can set the target cells 4135  else 4136  { 4137  tdata.pid_dest = pid3; 4138  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, &primary_ents3 ); 4139  weightMap->SetDestinationNDofsPerElement( ndofPerEl ); 4140  weightMap->set_row_dc_dofs( values_entities ); // will set row_dtoc_dofmap 4141  } 4142  } 4143  4144  return moab::MB_SUCCESS; 4145 } 4146  4147 #endif // #ifdef MOAB_HAVE_MPI 4148  4149 #endif // #ifdef MOAB_HAVE_NETCDF 4150  4151 #define USE_API 4152 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius ) 4153 { 4154  ErrorCode rval; 4155  moab::CartVect pos; 4156  4157  Range& verts = context.appDatas[*pid].all_verts; 4158  *radius = 1.0; 4159  if( verts.empty() ) return moab::MB_SUCCESS; 4160  moab::EntityHandle firstVertex = ( verts[0] ); 4161  4162  // coordinate data 4163  rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval ); 4164  4165  // compute the distance from origin 4166  // TODO: we could do this in a loop to verify if the pid represents a spherical mesh 4167  *radius = pos.length(); 4168  return moab::MB_SUCCESS; 4169 } 4170  4171 ErrCode iMOAB_SetMapGhostLayers( iMOAB_AppID pid, int* n_src_ghost_layers, int* n_tgt_ghost_layers ) 4172 { 4173  appData& data = context.appDatas[*pid]; 4174  4175  // Set the number of ghost layers 4176  data.tempestData.num_src_ghost_layers = *n_src_ghost_layers; // number of ghost layers for source 4177  data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers; // number of ghost layers for source 4178  4179  return moab::MB_SUCCESS; 4180 } 4181  4182 ErrCode iMOAB_ComputeCoverageMesh( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx ) 4183 { 4184  // Default constant parameters 4185  constexpr bool validate = true; 4186  constexpr bool meshCleanup = true; 4187  constexpr bool gnomonic = true; 4188  constexpr double defaultradius = 1.0; 4189  constexpr double boxeps = 1.e-10; 4190  4191  // Other constant parameters 4192  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ; 4193  double radius_source = 1.0; 4194  double radius_target = 1.0; 4195  4196  // Error code definitions 4197  ErrorCode rval; 4198  ErrCode ierr; 4199  4200  // Get the source and target data and pcomm objects 4201  appData& data_src = context.appDatas[*pid_src]; 4202  appData& data_tgt = context.appDatas[*pid_tgt]; 4203  appData& data_intx = context.appDatas[*pid_intx]; 4204 #ifdef MOAB_HAVE_MPI 4205  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm; 4206 #endif 4207  4208  // Mesh intersection has already been computed; Return early. 4209  TempestMapAppData& tdata = data_intx.tempestData; 4210  if( tdata.remapper != nullptr ) return moab::MB_SUCCESS; // nothing to do 4211  4212  int rank = 0; 4213 #ifdef MOAB_HAVE_MPI 4214  if( pco_intx ) 4215  { 4216  rank = pco_intx->rank(); 4217  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval ); 4218  } 4219 #endif 4220  4221  moab::DebugOutput outputFormatter( std::cout, rank, 0 ); 4222  outputFormatter.set_prefix( "[iMOAB_ComputeCoverageMesh]: " ); 4223  4224  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr ); 4225  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr ); 4226  4227  // Rescale the radius of both to compute the intersection 4228  rval = ComputeSphereRadius( pid_src, &radius_source );MB_CHK_ERR( rval ); 4229  rval = ComputeSphereRadius( pid_tgt, &radius_target );MB_CHK_ERR( rval ); 4230 #ifdef VERBOSE 4231  if( !rank ) 4232  outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source, 4233  radius_target ); 4234 #endif 4235  4236  /* Let make sure that the radius match for source and target meshes. If not, rescale now and 4237  * unscale later. */ 4238  if( fabs( radius_source - radius_target ) > 1e-10 ) 4239  { /* the radii are different */ 4240  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval ); 4241  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval ); 4242  } 4243  4244  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available) 4245 #ifdef MOAB_HAVE_TEMPESTREMAP 4246  IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature ); 4247 #else 4248  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller ); 4249 #endif 4250  4251  if( meshCleanup ) 4252  { 4253  // Address issues for source mesh first 4254  // fixes to enforce positive orientation of the vertices (outward normal) 4255  MB_CHK_ERR( 4256  areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ ) ); 4257  4258  // fixes to clean up any degenerate quadrangular elements present in the mesh (RLL specifically?) 4259  MB_CHK_ERR( IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set ) ); 4260  4261  // fixes to enforce convexity in case concave elements are present 4262  MB_CHK_ERR( moab::IntxUtils::enforce_convexity( context.MBI, data_src.file_set, rank ) ); 4263  4264  // Address issues for target mesh first 4265  // fixes to enforce positive orientation of the vertices (outward normal) 4266  MB_CHK_ERR( 4267  areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ ) ); 4268  4269  // fixes to clean up any degenerate quadrangular elements present in the mesh (RLL specifically?) 4270  MB_CHK_ERR( IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set ) ); 4271  4272  // fixes to enforce convexity in case concave elements are present 4273  MB_CHK_ERR( moab::IntxUtils::enforce_convexity( context.MBI, data_tgt.file_set, rank ) ); 4274  } 4275  4276  // print verbosely about the problem setting 4277 #ifdef VERBOSE 4278  { 4279  moab::Range rintxverts, rintxelems; 4280  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval ); 4281  rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval ); 4282  4283  moab::Range bintxverts, bintxelems; 4284  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval ); 4285  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval ); 4286  4287  if( !rank ) 4288  { 4289  outputFormatter.printf( 0, "The source set contains %zu vertices and %zu elements\n", rintxverts.size(), 4290  rintxelems.size() ); 4291  outputFormatter.printf( 0, "The target set contains %zu vertices and %zu elements\n", bintxverts.size(), 4292  bintxelems.size() ); 4293  } 4294  } 4295 #endif 4296  // use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] ); 4297  // advancing front method will fail unless source mesh has no holes (area = 4 * pi) on unit sphere 4298  // use_kdtree_search = true; 4299  4300  data_intx.dimension = data_tgt.dimension; 4301  // set the context for the source and destination applications 4302  tdata.pid_src = pid_src; 4303  tdata.pid_dest = pid_tgt; 4304  4305  // Now allocate and initialize the remapper object 4306 #ifdef MOAB_HAVE_MPI 4307  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx ); 4308 #else 4309  tdata.remapper = new moab::TempestRemapper( context.MBI ); 4310 #endif 4311  tdata.remapper->meshValidate = validate; 4312  tdata.remapper->constructEdgeMap = true; 4313  4314  // Do not create new filesets; Use the sets from our respective applications 4315  tdata.remapper->initialize( false ); 4316  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set; 4317  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set; 4318  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set; 4319  4320  // First, compute the covering source set. 4321  rval = 4322  tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic, tdata.num_src_ghost_layers );MB_CHK_ERR( rval ); 4323  4324 #ifdef MOAB_HAVE_TEMPESTREMAP 4325  // set the reference to the covering set in the source PID 4326  data_intx.secondary_file_set = tdata.remapper->GetMeshSet( moab::Remapper::CoveringMesh ); 4327 #endif 4328  4329  return moab::MB_SUCCESS; 4330 } 4331  4332 ErrCode iMOAB_WriteCoverageMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options ) 4333 { 4334  return internal_WriteMesh( pid, filename, write_options, false ); 4335 } 4336  4337 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx ) 4338 { 4339  // Default constant parameters 4340  constexpr bool validate = true; 4341  constexpr bool use_kdtree_search = true; 4342  constexpr double defaultradius = 1.0; 4343  4344  // Get the source and target data and pcomm objects 4345  appData& data_src = context.appDatas[*pid_src]; 4346  appData& data_tgt = context.appDatas[*pid_tgt]; 4347  appData& data_intx = context.appDatas[*pid_intx]; 4348 #ifdef MOAB_HAVE_MPI 4349  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm; 4350 #endif 4351  4352  // Mesh intersection has already been computed; Return early. 4353  TempestMapAppData& tdata = data_intx.tempestData; 4354  4355  if( tdata.remapper == nullptr ) 4356  { 4357  // user has not called the coverage mesh computation routine -- so explicitly call it now 4358  // this check supports the traditional workflow of directly computing mesh intersection 4359  // and letting this routine compute coverage mesh as needed 4360  MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_src, pid_tgt, pid_intx ) ); 4361  } 4362  4363  int rank = 0; 4364 #ifdef MOAB_HAVE_MPI 4365  rank = pco_intx->rank(); 4366 #endif 4367  moab::DebugOutput outputFormatter( std::cout, rank, 0 ); 4368  outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " ); 4369  4370  // Next, compute intersections with MOAB. 4371  // for bilinear, this is an overkill 4372  MB_CHK_ERR( tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false ) ); 4373  4374  // Mapping computation done 4375  if( validate ) 4376  { 4377  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available) 4378  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller ); 4379  double local_areas[3] = { 0.0, 0.0, 0.0 }, global_areas[3] = { 0.0, 0.0, 0.0 }; 4380  local_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ ); 4381  local_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ ); 4382  local_areas[2] = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, defaultradius ); 4383 #ifdef MOAB_HAVE_MPI 4384  global_areas[0] = global_areas[1] = global_areas[2] = 0.0; 4385  MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() ); 4386 #else 4387  global_areas[0] = local_areas[0]; 4388  global_areas[1] = local_areas[1]; 4389  global_areas[2] = local_areas[2]; 4390 #endif 4391  4392  if( rank == 0 ) 4393  { 4394  outputFormatter.printf( 0, 4395  "initial area: source mesh = %12.14f, target mesh = %12.14f, " 4396  "overlap mesh = %12.14f\n", 4397  global_areas[0], global_areas[1], global_areas[2] ); 4398  outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n", 4399  fabs( global_areas[0] - global_areas[2] ) / global_areas[0], 4400  fabs( global_areas[1] - global_areas[2] ) / global_areas[1] ); 4401  } 4402  } 4403  4404  return moab::MB_SUCCESS; 4405 } 4406  4407 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx ) 4408 { 4409  ErrorCode rval; 4410  ErrCode ierr; 4411  4412  double radius_source = 1.0; 4413  double radius_target = 1.0; 4414  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ; 4415  const double boxeps = 1.e-8; 4416  4417  // Get the source and target data and pcomm objects 4418  appData& data_src = context.appDatas[*pid_src]; 4419  appData& data_tgt = context.appDatas[*pid_tgt]; 4420  appData& data_intx = context.appDatas[*pid_intx]; 4421 #ifdef MOAB_HAVE_MPI 4422  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm; 4423 #endif 4424  4425  // Mesh intersection has already been computed; Return early. 4426  TempestMapAppData& tdata = data_intx.tempestData; 4427  if( tdata.remapper != nullptr ) return moab::MB_SUCCESS; // nothing to do 4428  4429 #ifdef MOAB_HAVE_MPI 4430  if( pco_intx ) 4431  { 4432  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval ); 4433  } 4434 #endif 4435  4436  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr ); 4437  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr ); 4438  4439  // Rescale the radius of both to compute the intersection 4440  ComputeSphereRadius( pid_src, &radius_source ); 4441  ComputeSphereRadius( pid_tgt, &radius_target ); 4442  4443  IntxAreaUtils areaAdaptor; 4444  // print verbosely about the problem setting 4445  { 4446  moab::Range rintxverts, rintxelems; 4447  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval ); 4448  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval ); 4449  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval ); 4450  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval ); 4451 #ifdef VERBOSE 4452  std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size() 4453  << " elements \n"; 4454 #endif 4455  4456  moab::Range bintxverts, bintxelems; 4457  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval ); 4458  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval ); 4459  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval ); 4460  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval ); 4461 #ifdef VERBOSE 4462  std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size() 4463  << " elements \n"; 4464 #endif 4465  } 4466  4467  data_intx.dimension = data_tgt.dimension; 4468  // set the context for the source and destination applications 4469  // set the context for the source and destination applications 4470  tdata.pid_src = pid_src; 4471  tdata.pid_dest = pid_tgt; 4472  data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud ); 4473  assert( data_intx.point_cloud == true ); 4474  4475  // Now allocate and initialize the remapper object 4476 #ifdef MOAB_HAVE_MPI 4477  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx ); 4478 #else 4479  tdata.remapper = new moab::TempestRemapper( context.MBI ); 4480 #endif 4481  tdata.remapper->meshValidate = true; 4482  tdata.remapper->constructEdgeMap = true; 4483  4484  // Do not create new filesets; Use the sets from our respective applications 4485  tdata.remapper->initialize( false ); 4486  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set; 4487  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set; 4488  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set; 4489  4490  /* Let make sure that the radius match for source and target meshes. If not, rescale now and 4491  * unscale later. */ 4492  if( fabs( radius_source - radius_target ) > 1e-10 ) 4493  { /* the radii are different */ 4494  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval ); 4495  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval ); 4496  } 4497  4498  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval ); 4499  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval ); 4500  4501  // First, compute the covering source set. 4502  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval ); 4503  4504 #ifdef MOAB_HAVE_MPI 4505  /* VSM: This context should be set on the data_src but it would overwrite the source 4506  covering set context in case it is coupled to another APP as well. 4507  This needs a redesign. */ 4508  // data_intx.covering_set = tdata.remapper->GetCoveringSet(); 4509  // data_src.covering_set = tdata.remapper->GetCoveringSet(); 4510 #endif 4511  4512  // Now let us re-convert the MOAB mesh back to Tempest representation 4513  // rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval ); 4514  4515  return moab::MB_SUCCESS; 4516 } 4517  4518 ErrCode iMOAB_ComputeScalarProjectionWeights( 4519  iMOAB_AppID pid_intx, 4520  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */ 4521  const iMOAB_String disc_method_source, 4522  int* disc_order_source, 4523  const iMOAB_String disc_method_target, 4524  int* disc_order_target, 4525  const iMOAB_String fv_method, 4526  int* fNoBubble, 4527  int* fMonotoneTypeID, 4528  int* fVolumetric, 4529  int* fInverseDistanceMap, 4530  int* fNoConservation, 4531  int* fValidate, 4532  const iMOAB_String source_solution_tag_dof_name, 4533  const iMOAB_String target_solution_tag_dof_name ) 4534 { 4535  assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 ); 4536  assert( solution_weights_identifier && strlen( solution_weights_identifier ) ); 4537  assert( disc_method_source && strlen( disc_method_source ) ); 4538  assert( disc_method_target && strlen( disc_method_target ) ); 4539  assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) ); 4540  assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) ); 4541  4542  // Get the source and target data and pcomm objects 4543  appData& data_intx = context.appDatas[*pid_intx]; 4544  TempestMapAppData& tdata = data_intx.tempestData; 4545  4546  // Setup computation of weights 4547  // Set the context for the remapping weights computation 4548  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper ); 4549  4550  // Call to generate the remap weights with the tempest meshes 4551  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )]; 4552  assert( weightMap != nullptr ); 4553  4554  GenerateOfflineMapAlgorithmOptions mapOptions; 4555  mapOptions.nPin = *disc_order_source; 4556  mapOptions.nPout = *disc_order_target; 4557  mapOptions.fSourceConcave = false; 4558  mapOptions.fTargetConcave = false; 4559  4560  mapOptions.strMethod = ""; 4561  if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";"; 4562  if( fMonotoneTypeID ) 4563  { 4564  switch( *fMonotoneTypeID ) 4565  { 4566  case 0: 4567  mapOptions.fMonotone = false; 4568  break; 4569  case 3: 4570  mapOptions.strMethod += "mono3;"; 4571  break; 4572  case 2: 4573  mapOptions.strMethod += "mono2;"; 4574  break; 4575  default: 4576  mapOptions.fMonotone = true; 4577  } 4578  } 4579  else 4580  mapOptions.fMonotone = false; 4581  4582  mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false ); 4583  mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false ); 4584  mapOptions.fNoCorrectAreas = false; 4585  // mapOptions.fNoCheck = !( fValidate ? *fValidate : true ); 4586  mapOptions.fNoCheck = true; 4587  if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;"; 4588  if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;"; 4589  4590  std::string metadataStr = mapOptions.strMethod + ";" + std::string( disc_method_source ) + ":" + 4591  std::to_string( *disc_order_source ) + ":" + std::string( source_solution_tag_dof_name ) + 4592  ";" + std::string( disc_method_target ) + ":" + std::to_string( *disc_order_target ) + 4593  ":" + std::string( target_solution_tag_dof_name ); 4594  4595  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr; 4596  4597  // Now let us compute the local-global mapping and store it in the context 4598  // We need this mapping when computing matvec products and to do reductions in parallel 4599  // Additionally, the call below will also compute weights with TempestRemap 4600  MB_CHK_ERR( weightMap->GenerateRemappingWeights( 4601  std::string( disc_method_source ), // const std::string& strInputType 4602  std::string( disc_method_target ), // const std::string& strOutputType 4603  mapOptions, // GenerateOfflineMapAlgorithmOptions& mapOptions 4604  std::string( source_solution_tag_dof_name ), // const std::string& srcDofTagName = "GLOBAL_ID" 4605  std::string( target_solution_tag_dof_name ) // const std::string& tgtDofTagName = "GLOBAL_ID" 4606  ) ); 4607  4608  // print some map statistics 4609  weightMap->PrintMapStatistics(); 4610  4611  if( fValidate && *fValidate ) 4612  { 4613  const double dNormalTolerance = 1.0E-8; 4614  const double dStrictTolerance = 1.0E-12; 4615  weightMap->CheckMap( true, true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance ); 4616  } 4617  4618  return moab::MB_SUCCESS; 4619 } 4620  4621 ErrCode iMOAB_ApplyScalarProjectionWeights( 4622  iMOAB_AppID pid_intersection, 4623  int* filter_type, 4624  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */ 4625  const iMOAB_String source_solution_tag_name, 4626  const iMOAB_String target_solution_tag_name ) 4627 { 4628  assert( solution_weights_identifier && strlen( solution_weights_identifier ) ); 4629  assert( source_solution_tag_name && strlen( source_solution_tag_name ) ); 4630  assert( target_solution_tag_name && strlen( target_solution_tag_name ) ); 4631  4632  moab::ErrorCode rval; 4633  4634  // Get the source and target data and pcomm objects 4635  appData& data_intx = context.appDatas[*pid_intersection]; 4636  TempestMapAppData& tdata = data_intx.tempestData; 4637  4638  if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) ) // key does not exist 4639  return moab::MB_INDEX_OUT_OF_RANGE; 4640  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )]; 4641  4642  // we assume that there are separators ":" between the tag names 4643  std::vector< std::string > srcNames; 4644  std::vector< std::string > tgtNames; 4645  std::vector< Tag > srcTagHandles; 4646  std::vector< Tag > tgtTagHandles; 4647  std::string separator( ":" ); 4648  std::string src_name( source_solution_tag_name ); 4649  std::string tgt_name( target_solution_tag_name ); 4650  split_tag_names( src_name, separator, srcNames ); 4651  split_tag_names( tgt_name, separator, tgtNames ); 4652  if( srcNames.size() != tgtNames.size() ) 4653  { 4654  std::cout << " error in parsing source and target tag names. \n"; 4655  return moab::MB_FAILURE; 4656  } 4657  4658  // get both source and target tag handles 4659  for( size_t i = 0; i < srcNames.size(); i++ ) 4660  { 4661  Tag tagHandle; 4662  // get source tag handles and arrange in order 4663  rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle ); 4664  if( MB_SUCCESS != rval || nullptr == tagHandle ) 4665  { 4666  return moab::MB_TAG_NOT_FOUND; 4667  } 4668  srcTagHandles.push_back( tagHandle ); 4669  4670  // get corresponding target handles and arrange in the same order 4671  rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle ); 4672  if( MB_SUCCESS != rval || nullptr == tagHandle ) 4673  { 4674  return moab::MB_TAG_NOT_FOUND; 4675  } 4676  tgtTagHandles.push_back( tagHandle ); 4677  } 4678  4679 #ifdef VERBOSE 4680  // Now get reference to the remapper object 4681  moab::TempestRemapper* remapper = tdata.remapper; 4682  std::vector< double > solSTagVals; 4683  std::vector< double > solTTagVals; 4684  4685  moab::Range sents, tents; 4686  if( data_intx.point_cloud ) 4687  { 4688  appData& data_src = context.appDatas[*tdata.pid_src]; 4689  appData& data_tgt = context.appDatas[*tdata.pid_dest]; 4690  if( data_src.point_cloud ) 4691  { 4692  moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh ); 4693  solSTagVals.resize( covSrcEnts.size(), 0. ); 4694  sents = covSrcEnts; 4695  } 4696  else 4697  { 4698  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh ); 4699  solSTagVals.resize( 4700  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), 0. ); 4701  sents = covSrcEnts; 4702  } 4703  if( data_tgt.point_cloud ) 4704  { 4705  moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh ); 4706  solTTagVals.resize( tgtEnts.size(), 0. ); 4707  tents = tgtEnts; 4708  } 4709  else 4710  { 4711  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh ); 4712  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() * 4713  weightMap->GetDestinationNDofsPerElement(), 4714  0. ); 4715  tents = tgtEnts; 4716  } 4717  } 4718  else 4719  { 4720  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh ); 4721  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh ); 4722  solSTagVals.resize( 4723  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 ); 4724  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() * 4725  weightMap->GetDestinationNDofsPerElement(), 4726  0. ); 4727  4728  sents = covSrcEnts; 4729  tents = tgtEnts; 4730  } 4731 #endif 4732  4733  moab::TempestOnlineMap::CAASType caasType = moab::TempestOnlineMap::CAAS_NONE; 4734  if( filter_type ) 4735  { 4736  switch( *filter_type ) 4737  { 4738  case 1: 4739  caasType = moab::TempestOnlineMap::CAAS_GLOBAL; 4740  break; 4741  case 2: 4742  caasType = moab::TempestOnlineMap::CAAS_LOCAL; 4743  break; 4744  case 3: 4745  caasType = moab::TempestOnlineMap::CAAS_LOCAL_ADJACENT; 4746  break; 4747  default: 4748  caasType = moab::TempestOnlineMap::CAAS_NONE; 4749  } 4750  } 4751  4752  for( size_t i = 0; i < srcTagHandles.size(); i++ ) 4753  { 4754  // Compute the application of weights on the suorce solution data and store it in the 4755  // destination solution vector data Optionally, can also perform the transpose application 4756  // of the weight matrix. Set the 3rd argument to true if this is needed 4757  rval = weightMap->ApplyWeights( srcTagHandles[i], tgtTagHandles[i], false, caasType );MB_CHK_ERR( rval ); 4758  } 4759  4760 // #define VERBOSE 4761 #ifdef VERBOSE 4762  ParallelComm* pco_intx = context.appDatas[*pid_intersection].pcomm; 4763  4764  int ivar = 0; 4765  { 4766  Tag ssolnTag = srcTagHandles[ivar]; 4767  std::stringstream sstr; 4768  sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt"; 4769  std::ofstream output_file( sstr.str().c_str() ); 4770  for( unsigned i = 0; i < sents.size(); ++i ) 4771  { 4772  EntityHandle elem = sents[i]; 4773  std::vector< double > locsolSTagVals( 16 ); 4774  rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval ); 4775  output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t"; 4776  for( unsigned j = 0; j < 16; ++j ) 4777  output_file << locsolSTagVals[j] << " "; 4778  } 4779  output_file.flush(); // required here 4780  output_file.close(); 4781  } 4782  { 4783  std::stringstream sstr; 4784  sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m"; 4785  EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set, 4786  context.appDatas[*tdata.pid_dest].file_set }; 4787  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval ); 4788  } 4789  { 4790  std::stringstream sstr; 4791  sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m"; 4792  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set}; 4793  EntityHandle covering_set = remapper->GetCoveringSet(); 4794  EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set }; 4795  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval ); 4796  } 4797  { 4798  std::stringstream sstr; 4799  sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk"; 4800  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set}; 4801  EntityHandle covering_set = remapper->GetCoveringSet(); 4802  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &covering_set, 1 );MB_CHK_ERR( rval ); 4803  } 4804  { 4805  std::stringstream sstr; 4806  sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk"; 4807  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set}; 4808  EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh ); 4809  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &target_set, 1 );MB_CHK_ERR( rval ); 4810  } 4811  { 4812  std::stringstream sstr; 4813  sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt"; 4814  std::ofstream output_file( sstr.str().c_str() ); 4815  for( unsigned i = 0; i < solSTagVals.size(); ++i ) 4816  output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " " 4817  << solSTagVals[i] << "\n"; 4818  output_file.flush(); // required here 4819  output_file.close(); 4820  } 4821 #endif 4822  // #undef VERBOSE 4823  4824  return moab::MB_SUCCESS; 4825 } 4826  4827 #endif // MOAB_HAVE_TEMPESTREMAP 4828  4829 #ifdef __cplusplus 4830 } 4831 #endif