Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp File Reference
#include "moab/MOABConfig.h"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "moab/ParCommGraph.hpp"
#include "moab/ParallelMergeMesh.hpp"
#include "moab/IntxMesh/IntxUtils.hpp"
#include "DebugOutput.hpp"
#include "moab/iMOAB.h"
#include "mhdf.h"
#include <H5Tpublic.h>
#include "moab/CartVect.hpp"
#include "MBTagConventions.hpp"
#include "moab/MeshTopoUtil.hpp"
#include "moab/ReadUtilIface.hpp"
#include "moab/MergeMesh.hpp"
#include <cassert>
#include <sstream>
#include <iostream>
+ Include dependency graph for iMOAB.cpp:

Go to the source code of this file.

Classes

struct  appData
 
struct  GlobalContext
 

Functions

ErrCode iMOAB_Initialize (int argc, iMOAB_String *argv)
 Initialize the iMOAB interface implementation. More...
 
ErrCode iMOAB_InitializeFortran ()
 Initialize the iMOAB interface implementation from Fortran driver. More...
 
ErrCode iMOAB_Finalize ()
 Finalize the iMOAB interface implementation. More...
 
static int apphash (const iMOAB_String str, int identifier=0)
 
ErrCode iMOAB_RegisterApplication (const iMOAB_String app_name, MPI_Comm *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_RegisterApplicationFortran (const iMOAB_String app_name, int *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_DeregisterApplication (iMOAB_AppID pid)
 De-Register application: delete mesh (set) associated with the application ID. More...
 
ErrCode iMOAB_DeregisterApplicationFortran (iMOAB_AppID pid)
 De-Register the Fortran based application: delete mesh (set) associated with the application ID. More...
 
ErrCode iMOAB_ReadHeaderInfo (const iMOAB_String filename, int *num_global_vertices, int *num_global_elements, int *num_dimension, int *num_parts)
 It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file. More...
 
ErrCode iMOAB_LoadMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options, int *num_ghost_layers)
 Load a MOAB mesh file in parallel and exchange ghost layers as requested. More...
 
static ErrCode internal_WriteMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options, bool primary_set=true)
 
ErrCode iMOAB_WriteMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options)
 Write a MOAB mesh along with the solution tags to a file. More...
 
ErrCode iMOAB_WriteLocalMesh (iMOAB_AppID pid, iMOAB_String prefix)
 Write a local MOAB mesh copy. More...
 
ErrCode iMOAB_UpdateMeshInfo (iMOAB_AppID pid)
 Update local mesh data structure, from file information. More...
 
ErrCode iMOAB_GetMeshInfo (iMOAB_AppID pid, int *num_visible_vertices, int *num_visible_elements, int *num_visible_blocks, int *num_visible_surfaceBC, int *num_visible_vertexBC)
 Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions. More...
 
ErrCode iMOAB_GetVertexID (iMOAB_AppID pid, int *vertices_length, iMOAB_GlobalID *global_vertex_ID)
 Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices. More...
 
ErrCode iMOAB_GetVertexOwnership (iMOAB_AppID pid, int *vertices_length, int *visible_global_rank_ID)
 Get vertex ownership information. More...
 
ErrCode iMOAB_GetVisibleVerticesCoordinates (iMOAB_AppID pid, int *coords_length, double *coordinates)
 Get vertex coordinates for all local (owned and ghosted) vertices. More...
 
ErrCode iMOAB_GetBlockID (iMOAB_AppID pid, int *block_length, iMOAB_GlobalID *global_block_IDs)
 Get the global block IDs for all locally visible (owned and shared/ghosted) blocks. More...
 
ErrCode iMOAB_GetBlockInfo (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *vertices_per_element, int *num_elements_in_block)
 Get the global block information and number of visible elements of belonging to a block (MATERIAL SET). More...
 
ErrCode iMOAB_GetVisibleElementsInfo (iMOAB_AppID pid, int *num_visible_elements, iMOAB_GlobalID *element_global_IDs, int *ranks, iMOAB_GlobalID *block_IDs)
 Get the visible elements information. More...
 
ErrCode iMOAB_GetBlockElementConnectivities (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *connectivity_length, int *element_connectivity)
 Get the connectivities for elements contained within a certain block. More...
 
ErrCode iMOAB_GetElementConnectivity (iMOAB_AppID pid, iMOAB_LocalID *elem_index, int *connectivity_length, int *element_connectivity)
 Get the connectivity for one element only. More...
 
ErrCode iMOAB_GetElementOwnership (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, int *element_ownership)
 Get the element ownership within a certain block i.e., processor ID of the element owner. More...
 
ErrCode iMOAB_GetElementID (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, iMOAB_GlobalID *global_element_ID, iMOAB_LocalID *local_element_ID)
 Get the global IDs for all locally visible elements belonging to a particular block. More...
 
ErrCode iMOAB_GetPointerToSurfaceBC (iMOAB_AppID pid, int *surface_BC_length, iMOAB_LocalID *local_element_ID, int *reference_surface_ID, int *boundary_condition_value)
 Get the surface boundary condition information. More...
 
ErrCode iMOAB_GetPointerToVertexBC (iMOAB_AppID pid, int *vertex_BC_length, iMOAB_LocalID *local_vertex_ID, int *boundary_condition_value)
 Get the vertex boundary condition information. More...
 
static void split_tag_names (std::string input_names, std::string &separator, std::vector< std::string > &list_tag_names)
 
ErrCode iMOAB_DefineTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *tag_type, int *components_per_entity, int *tag_index)
 Define a MOAB Tag corresponding to the application depending on requested types. More...
 
ErrCode iMOAB_SetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Store the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_GetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Get the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Store the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorageWithGid (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data, int *globalIds)
 Store the specified values in a MOAB double Tag, for given ids. More...
 
ErrCode iMOAB_GetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Retrieve the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SynchronizeTags (iMOAB_AppID pid, int *num_tag, int *tag_indices, int *ent_type)
 Exchange tag values for the given tags across process boundaries. More...
 
ErrCode iMOAB_ReduceTagsMax (iMOAB_AppID pid, int *tag_index, int *ent_type)
 Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index. More...
 
ErrCode iMOAB_GetNeighborElements (iMOAB_AppID pid, iMOAB_LocalID *local_index, int *num_adjacent_elements, iMOAB_LocalID *adjacent_element_IDs)
 Retrieve the adjacencies for the element entities. More...
 
ErrCode iMOAB_CreateVertices (iMOAB_AppID pid, int *coords_len, int *dim, double *coordinates)
 Create vertices for an app; it assumes no other vertices. More...
 
ErrCode iMOAB_CreateElements (iMOAB_AppID pid, int *num_elem, int *type, int *num_nodes_per_element, int *connectivity, int *block_ID)
 Create elements for an app; it assumes connectivity from local vertices, in order. More...
 
ErrCode iMOAB_SetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce. More...
 
ErrCode iMOAB_GetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Get global information about number of vertices and number of elements. More...
 
ErrCode iMOAB_ResolveSharedEntities (iMOAB_AppID pid, int *num_verts, int *marker)
 Resolve shared entities using global markers on shared vertices. More...
 
ErrCode iMOAB_DetermineGhostEntities (iMOAB_AppID pid, int *ghost_dim, int *num_ghost_layers, int *bridge_dim)
 Create the requested number of ghost layers for the parallel mesh. More...
 
ErrCode iMOAB_SendMesh (iMOAB_AppID pid, MPI_Comm *joint_communicator, MPI_Group *receivingGroup, int *rcompid, int *method)
 
ErrCode iMOAB_ReceiveMesh (iMOAB_AppID pid, MPI_Comm *joint_communicator, MPI_Group *sendingGroup, int *scompid)
 
ErrCode iMOAB_SendElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *joint_communicator, int *context_id)
 
ErrCode iMOAB_ReceiveElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *joint_communicator, int *context_id)
 
ErrCode iMOAB_FreeSenderBuffers (iMOAB_AppID pid, int *context_id)
 
ErrCode iMOAB_ComputeCommGraph (iMOAB_AppID pid1, iMOAB_AppID pid2, MPI_Comm *joint_communicator, MPI_Group *group1, MPI_Group *group2, int *type1, int *type2, int *comp1, int *comp2)
 
ErrCode iMOAB_MergeVertices (iMOAB_AppID pid)
 

Variables

static struct GlobalContext context
 

Function Documentation

◆ apphash()

static int apphash ( const iMOAB_String  str,
int  identifier = 0 
)
static

Definition at line 196 of file iMOAB.cpp.

197 {
198  // A minimal perfect hash function (MPHF)
199  std::string appstr( str );
200  // Fowler–Noll–Vo (or FNV) is a non-cryptographic hash function created
201  // by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo.
202  // Ref: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
203  unsigned int h = 2166136261u; // FNV offset basis
204  for( char c : appstr )
205  {
206  h ^= static_cast< unsigned char >( c );
207  h *= 16777619u; // FNV prime
208  }
209  // Incorporate the integer into the hash
210  if( identifier )
211  {
212  h ^= static_cast< unsigned int >( identifier & 0xFFFFFFFF );
213  h *= 16777619u; // FNV prime again
214  }
215  return static_cast< int >( h & 0x7FFFFFFF ); // Mask to ensure positive value
216 }

Referenced by iMOAB_RegisterApplication().

◆ iMOAB_ComputeCommGraph()

ErrCode iMOAB_ComputeCommGraph ( iMOAB_AppID  pid1,
iMOAB_AppID  pid2,
MPI_Comm *  joint_communicator,
MPI_Group *  group1,
MPI_Group *  group2,
int *  type1,
int *  type2,
int *  comp1,
int *  comp2 
)

Definition at line 2821 of file iMOAB.cpp.

2830 {
2831  assert( joint_communicator );
2832  assert( group1 );
2833  assert( group2 );
2834  ErrorCode rval = MB_SUCCESS;
2835  int localRank = 0, numProcs = 1;
2836 
2837  bool isFortran = false;
2838  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2839  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2840 
2841  MPI_Comm global =
2842  ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) : *joint_communicator );
2843  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2844  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2845 
2846  MPI_Comm_rank( global, &localRank );
2847  MPI_Comm_size( global, &numProcs );
2848  // instantiate the par comm graph
2849 
2850  // we should search if we have another pcomm with the same comp ids in the list already
2851  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2852  ParCommGraph* cgraph = nullptr;
2853  ParCommGraph* cgraph_rev = nullptr;
2854  if( *pid1 >= 0 )
2855  {
2856  appData& data = context.appDatas[*pid1];
2857  auto mt = data.pgraph.find( *comp2 );
2858  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2859  // now let us compute the new communication graph
2860  cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2861  context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2862  }
2863  if( *pid2 >= 0 )
2864  {
2865  appData& data = context.appDatas[*pid2];
2866  auto mt = data.pgraph.find( *comp1 );
2867  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2868  // now let us compute the new communication graph
2869  cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2870  context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2871  }
2872 
2873  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2874  // model on the joint comm
2875  TupleList TLcomp1;
2876  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2877  TupleList TLcomp2;
2878  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2879  // will push_back a new tuple, if needed
2880 
2881  TLcomp1.enableWriteAccess();
2882 
2883  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2884  Tag gdsTag;
2885  // find the values on first cell
2886  int lenTagType1 = 1;
2887  if( 1 == *type1 || 1 == *type2 )
2888  {
2889  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2890  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2891  }
2892  Tag gidTag = context.MBI->globalId_tag();
2893 
2894  std::vector< int > valuesComp1;
2895  // populate first tuple
2896  if( *pid1 >= 0 )
2897  {
2898  appData& data1 = context.appDatas[*pid1];
2899  EntityHandle fset1 = data1.file_set;
2900  // in case of tempest remap, get the coverage set
2901 #ifdef MOAB_HAVE_TEMPESTREMAP
2902  if( data1.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2903  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2904 #endif
2905  Range ents_of_interest;
2906  if( *type1 == 1 )
2907  {
2908  assert( gdsTag );
2909  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2910  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2911  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2912  }
2913  else if( *type1 == 2 )
2914  {
2915  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2916  valuesComp1.resize( ents_of_interest.size() );
2917  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2918  }
2919  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2920  {
2921  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2922  valuesComp1.resize( ents_of_interest.size() );
2923  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2924  }
2925  else
2926  {
2927  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2928  }
2929  // now fill the tuple list with info and markers
2930  // because we will send only the ids, order and compress the list
2931  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2932  TLcomp1.resize( uniq.size() );
2933  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2934  {
2935  // to proc, marker, element local index, index in el
2936  int marker = *sit;
2937  int to_proc = marker % numProcs;
2938  int n = TLcomp1.get_n();
2939  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2940  TLcomp1.vi_wr[2 * n + 1] = marker;
2941  TLcomp1.inc_n();
2942  }
2943  }
2944 
2945  ProcConfig pc( global ); // proc config does the crystal router
2946  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2947  0 ); // communication towards joint tasks, with markers
2948  // sort by value (key 1)
2949 #ifdef VERBOSE
2950  std::stringstream ff1;
2951  ff1 << "TLcomp1_" << localRank << ".txt";
2952  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2953 #endif
2954  moab::TupleList::buffer sort_buffer;
2955  sort_buffer.buffer_init( TLcomp1.get_n() );
2956  TLcomp1.sort( 1, &sort_buffer );
2957  sort_buffer.reset();
2958 #ifdef VERBOSE
2959  // after sorting
2960  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2961 #endif
2962  // do the same, for the other component, number2, with type2
2963  // start copy
2964  TLcomp2.enableWriteAccess();
2965  // populate second tuple
2966  std::vector< int > valuesComp2;
2967  if( *pid2 >= 0 )
2968  {
2969  appData& data2 = context.appDatas[*pid2];
2970  EntityHandle fset2 = data2.file_set;
2971  // in case of tempest remap, get the coverage set
2972 #ifdef MOAB_HAVE_TEMPESTREMAP
2973  if( data2.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2974  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2975 #endif
2976 
2977  Range ents_of_interest;
2978  if( *type2 == 1 )
2979  {
2980  assert( gdsTag );
2981  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2982  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2983  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2984  }
2985  else if( *type2 == 2 )
2986  {
2987  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2988  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2989  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2990  }
2991  else if( *type2 == 3 )
2992  {
2993  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
2994  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2995  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2996  }
2997  else
2998  {
2999  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3000  }
3001  // now fill the tuple list with info and markers
3002  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3003  TLcomp2.resize( uniq.size() );
3004  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3005  {
3006  // to proc, marker, element local index, index in el
3007  int marker = *sit;
3008  int to_proc = marker % numProcs;
3009  int n = TLcomp2.get_n();
3010  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3011  TLcomp2.vi_wr[2 * n + 1] = marker;
3012  TLcomp2.inc_n();
3013  }
3014  }
3015  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3016  0 ); // communication towards joint tasks, with markers
3017  // sort by value (key 1)
3018 #ifdef VERBOSE
3019  std::stringstream ff2;
3020  ff2 << "TLcomp2_" << localRank << ".txt";
3021  TLcomp2.print_to_file( ff2.str().c_str() );
3022 #endif
3023  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3024  TLcomp2.sort( 1, &sort_buffer );
3025  sort_buffer.reset();
3026  // end copy
3027 #ifdef VERBOSE
3028  TLcomp2.print_to_file( ff2.str().c_str() );
3029 #endif
3030  // need to send back the info, from the rendezvous point, for each of the values
3031  /* so go over each value, on local process in joint communicator
3032 
3033  now have to send back the info needed for communication;
3034  loop in in sync over both TLComp1 and TLComp2, in local process;
3035  So, build new tuple lists, to send synchronous communication
3036  populate them at the same time, based on marker, that is indexed
3037  */
3038 
3039  TupleList TLBackToComp1;
3040  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3041  TLBackToComp1.enableWriteAccess();
3042 
3043  TupleList TLBackToComp2;
3044  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3045  TLBackToComp2.enableWriteAccess();
3046 
3047  int n1 = TLcomp1.get_n();
3048  int n2 = TLcomp2.get_n();
3049 
3050  int indexInTLComp1 = 0;
3051  int indexInTLComp2 = 0; // advance both, according to the marker
3052  if( n1 > 0 && n2 > 0 )
3053  {
3054  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3055  {
3056  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3057  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3058  if( currentValue1 < currentValue2 )
3059  {
3060  // we have a big problem; basically, we are saying that
3061  // dof currentValue is on one model and not on the other
3062  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3063  indexInTLComp1++;
3064  continue;
3065  }
3066  if( currentValue1 > currentValue2 )
3067  {
3068  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3069  indexInTLComp2++;
3070  continue;
3071  }
3072  int size1 = 1;
3073  int size2 = 1;
3074  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3075  size1++;
3076  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3077  size2++;
3078  // must be found in both lists, find the start and end indices
3079  for( int i1 = 0; i1 < size1; i1++ )
3080  {
3081  for( int i2 = 0; i2 < size2; i2++ )
3082  {
3083  // send the info back to components
3084  int n = TLBackToComp1.get_n();
3085  TLBackToComp1.reserve();
3086  TLBackToComp1.vi_wr[3 * n] =
3087  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3088  // came from, info from comp2
3089  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3090  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3091  n = TLBackToComp2.get_n();
3092  TLBackToComp2.reserve();
3093  TLBackToComp2.vi_wr[3 * n] =
3094  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3095  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3096  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3097  // what if there are repeated markers in TLcomp2? increase just index2
3098  }
3099  }
3100  indexInTLComp1 += size1;
3101  indexInTLComp2 += size2;
3102  }
3103  }
3104  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3105  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3106 
3107  if( *pid1 >= 0 )
3108  {
3109  // we are on original comp 1 tasks
3110  // before ordering
3111  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3112  // processors it communicates with
3113 #ifdef VERBOSE
3114  std::stringstream f1;
3115  f1 << "TLBack1_" << localRank << ".txt";
3116  TLBackToComp1.print_to_file( f1.str().c_str() );
3117 #endif
3118  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3119  TLBackToComp1.sort( 1, &sort_buffer );
3120  sort_buffer.reset();
3121 #ifdef VERBOSE
3122  TLBackToComp1.print_to_file( f1.str().c_str() );
3123 #endif
3124  // so we are now on pid1, we know now each marker were it has to go
3125  // add a new method to ParCommGraph, to set up the involved_IDs_map
3126  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3127  }
3128  if( *pid2 >= 0 )
3129  {
3130  // we are on original comp 1 tasks
3131  // before ordering
3132  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3133  // processors it communicates with
3134 #ifdef VERBOSE
3135  std::stringstream f2;
3136  f2 << "TLBack2_" << localRank << ".txt";
3137  TLBackToComp2.print_to_file( f2.str().c_str() );
3138 #endif
3139  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3140  TLBackToComp2.sort( 2, &sort_buffer );
3141  sort_buffer.reset();
3142 #ifdef VERBOSE
3143  TLBackToComp2.print_to_file( f2.str().c_str() );
3144 #endif
3145  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3146  }
3147  return moab::MB_SUCCESS;
3148 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MBQUAD, MBVERTEX, appData::pgraph, moab::TupleList::print_to_file(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::TupleList::resize(), moab::ParCommGraph::settle_comm_by_ids(), moab::Range::size(), moab::TupleList::sort(), moab::Interface::tag_get_data(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::TupleList::vi_rd, and moab::TupleList::vi_wr.

◆ iMOAB_CreateElements()

ErrCode iMOAB_CreateElements ( iMOAB_AppID  pid,
int *  num_elem,
int *  type,
int *  num_nodes_per_element,
int *  connectivity,
int *  block_ID 
)

Create elements for an app; it assumes connectivity from local vertices, in order.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_elem(int*) Number of elements.
[in]type(int*) Type of element (moab type).
[in]num_nodes_per_element(int*) Number of nodes per element.
[in]connectivity(int *) Connectivity array, with respect to vertices (assumed contiguous).
[in]block_ID(int *) The local block identifier, which will now contain the elements.
Returns
ErrCode The error code indicating success or failure.

add the new ents to the clock set

Definition at line 2233 of file iMOAB.cpp.

2239 {
2240  // Create elements
2241  appData& data = context.appDatas[*pid];
2242 
2243  ReadUtilIface* read_iface;
2244  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2245 
2246  EntityType mbtype = (EntityType)( *type );
2247  EntityHandle actual_start_handle;
2248  EntityHandle* array = nullptr;
2249  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2250 
2251  // fill up with actual connectivity from input; assume the vertices are in order, and start
2252  // vertex is the first in the current data vertex range
2253  EntityHandle firstVertex = data.local_verts[0];
2254 
2255  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2256  {
2257  array[j] = connectivity[j] + firstVertex - 1;
2258  } // assumes connectivity uses 1 based array (from fortran, mostly)
2259 
2260  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2261 
2262  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2263 
2264  data.primary_elems.merge( new_elems );
2265 
2266  // add to adjacency
2267  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2268  // organize all new elements in block, with the given block ID; if the block set is not
2269  // existing, create a new mesh set;
2270  Range sets;
2271  int set_no = *block_ID;
2272  const void* setno_ptr = &set_no;
2274  sets );
2275  EntityHandle block_set;
2276 
2277  if( MB_FAILURE == rval || sets.empty() )
2278  {
2279  // create a new set, with this block ID
2280  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2281 
2282  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2283 
2284  // add the material set to file set
2285  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2286  }
2287  else
2288  {
2289  block_set = sets[0];
2290  } // first set is the one we want
2291 
2292  /// add the new ents to the clock set
2293  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2294 
2295  return moab::MB_SUCCESS;
2296 }

References moab::Interface::add_entities(), GlobalContext::appDatas, context, moab::Interface::create_meshset(), moab::Range::empty(), ErrorCode, appData::file_set, moab::ReadUtilIface::get_element_connect(), moab::Interface::get_entities_by_type_and_tag(), appData::local_verts, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, moab::Range::merge(), MESHSET_SET, appData::primary_elems, moab::Interface::query_interface(), moab::Interface::tag_set_data(), and moab::ReadUtilIface::update_adjacencies().

◆ iMOAB_CreateVertices()

ErrCode iMOAB_CreateVertices ( iMOAB_AppID  pid,
int *  coords_len,
int *  dim,
double *  coordinates 
)

Create vertices for an app; it assumes no other vertices.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_len(int*) Size of the coordinates array (nverts * dim).
[in]dim(int*) Dimension of the vertex coordinates (usually 3).
[in]coordinates(double*) Coordinates of all vertices, interleaved.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2212 of file iMOAB.cpp.

2213 {
2214  ErrorCode rval;
2215  appData& data = context.appDatas[*pid];
2216 
2217  if( !data.local_verts.empty() ) // we should have no vertices in the app
2218  {
2219  return moab::MB_FAILURE;
2220  }
2221 
2222  int nverts = *coords_len / *dim;
2223 
2224  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2225 
2226  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2227 
2228  // also add the vertices to the all_verts range
2229  data.all_verts.merge( data.local_verts );
2230  return moab::MB_SUCCESS;
2231 }

References moab::Interface::add_entities(), appData::all_verts, GlobalContext::appDatas, context, moab::Interface::create_vertices(), dim, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::merge().

◆ iMOAB_DefineTagStorage()

ErrCode iMOAB_DefineTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  tag_type,
int *  components_per_entity,
int *  tag_index 
)

Define a MOAB Tag corresponding to the application depending on requested types.

Note
In MOAB, for most solution vectors, we only need to create a "Dense", "Double" Tag. A sparse tag can be created too. If the tag is already existing in the file, it will not be created. If it is a new tag, memory will be allocated when setting the values. Default values are 0 for for integer tags, 0.0 for double tags, 0 for entity handle tags.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retrieve the data in MOAB.
[in]tag_type(int*) The type of MOAB tag (Dense/Sparse, Double/Int/EntityHandle), enum MOAB_TAG_TYPE.
[in]components_per_entity(int*) The total size of vector dimension per entity for the tag (e.g., number of doubles per entity).
[out]tag_index(int*) The tag index which can be used as identifier in synchronize methods.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1496 of file iMOAB.cpp.

1501 {
1502  // we have 6 types of tags supported so far
1503  // check if tag type is valid
1504  if( *tag_type < 0 || *tag_type > 5 ) return moab::MB_FAILURE;
1505 
1506  DataType tagDataType;
1507  TagType tagType;
1508  void* defaultVal = nullptr;
1509  int* defInt = new int[*components_per_entity];
1510  double* defDouble = new double[*components_per_entity];
1511  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1512 
1513  for( int i = 0; i < *components_per_entity; i++ )
1514  {
1515  defInt[i] = 0;
1516  defDouble[i] = -1e+10;
1517  defHandle[i] = static_cast< EntityHandle >( 0 );
1518  }
1519 
1520  switch( *tag_type )
1521  {
1522  case 0:
1523  tagDataType = MB_TYPE_INTEGER;
1524  tagType = MB_TAG_DENSE;
1525  defaultVal = defInt;
1526  break;
1527 
1528  case 1:
1529  tagDataType = MB_TYPE_DOUBLE;
1530  tagType = MB_TAG_DENSE;
1531  defaultVal = defDouble;
1532  break;
1533 
1534  case 2:
1535  tagDataType = MB_TYPE_HANDLE;
1536  tagType = MB_TAG_DENSE;
1537  defaultVal = defHandle;
1538  break;
1539 
1540  case 3:
1541  tagDataType = MB_TYPE_INTEGER;
1542  tagType = MB_TAG_SPARSE;
1543  defaultVal = defInt;
1544  break;
1545 
1546  case 4:
1547  tagDataType = MB_TYPE_DOUBLE;
1548  tagType = MB_TAG_SPARSE;
1549  defaultVal = defDouble;
1550  break;
1551 
1552  case 5:
1553  tagDataType = MB_TYPE_HANDLE;
1554  tagType = MB_TAG_SPARSE;
1555  defaultVal = defHandle;
1556  break;
1557 
1558  default: {
1559  delete[] defInt;
1560  delete[] defDouble;
1561  delete[] defHandle;
1562  return moab::MB_FAILURE;
1563  } // error
1564  }
1565 
1566  // split storage names if separated list
1567  std::string tag_name( tag_storage_name );
1568  // first separate the names of the tags
1569  // we assume that there are separators ":" between the tag names
1570  std::vector< std::string > tagNames;
1571  std::string separator( ":" );
1572  split_tag_names( tag_name, separator, tagNames );
1573 
1574  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1575  appData& data = context.appDatas[*pid];
1576  int already_defined_tags = 0;
1577 
1578  Tag tagHandle;
1579  for( size_t i = 0; i < tagNames.size(); i++ )
1580  {
1581  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1582  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1583 
1584  if( MB_ALREADY_ALLOCATED == rval )
1585  {
1586  std::map< std::string, Tag >& mTags = data.tagMap;
1587  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1588 
1589  if( mit == mTags.end() )
1590  {
1591  // add it to the map
1592  mTags[tagNames[i]] = tagHandle;
1593  // push it to the list of tags, too
1594  *tag_index = (int)data.tagList.size();
1595  data.tagList.push_back( tagHandle );
1596  }
1597  rval = MB_SUCCESS;
1598  already_defined_tags++;
1599  }
1600  else if( MB_SUCCESS == rval )
1601  {
1602  data.tagMap[tagNames[i]] = tagHandle;
1603  *tag_index = (int)data.tagList.size();
1604  data.tagList.push_back( tagHandle );
1605  }
1606  else
1607  {
1608  rval = moab::MB_FAILURE; // some tags were not created
1609  }
1610  }
1611  // we don't need default values anymore, avoid leaks
1612  int rankHere = 0;
1613 #ifdef MOAB_HAVE_MPI
1614  ParallelComm* pco = context.appDatas[*pid].pcomm;
1615  rankHere = pco->rank();
1616 #endif
1617  if( !rankHere && already_defined_tags )
1618  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1619  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1620  << " tags \n";
1621  delete[] defInt;
1622  delete[] defDouble;
1623  delete[] defHandle;
1624  return rval;
1625 }

References GlobalContext::appDatas, context, ErrorCode, appData::global_id, MB_ALREADY_ALLOCATED, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_EXCL, MB_TAG_SPARSE, MB_TYPE_DOUBLE, MB_TYPE_HANDLE, MB_TYPE_INTEGER, GlobalContext::MBI, appData::name, moab::ParallelComm::rank(), split_tag_names(), moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and TagType.

Referenced by iMOAB_ReceiveMesh().

◆ iMOAB_DeregisterApplication()

ErrCode iMOAB_DeregisterApplication ( iMOAB_AppID  pid)

De-Register application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 336 of file iMOAB.cpp.

337 {
338  // the file set , parallel comm are all in vectors indexed by *pid
339  // assume we did not delete anything yet
340  // *pid will not be reused if we register another application
341  auto appIterator = context.appDatas.find( *pid );
342  // ensure that the application exists before we attempt to delete it
343  if( appIterator == context.appDatas.end() ) return MB_FAILURE;
344 
345  // we found the application
346  appData& data = appIterator->second;
347  int rankHere = 0;
348 #ifdef MOAB_HAVE_MPI
349  rankHere = data.pcomm->rank();
350 #endif
351  if( !rankHere )
352  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
353  << " is de-registered now \n";
354 
355  EntityHandle fileSet = data.file_set;
356  // get all entities part of the file set
357  Range fileents;
358  MB_CHK_ERR( context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true ) );
359  fileents.insert( fileSet );
360  MB_CHK_ERR( context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents ) ); // append all mesh sets
361 
362 #ifdef MOAB_HAVE_TEMPESTREMAP
363  if( data.tempestData.remapper ) delete data.tempestData.remapper;
364  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
365 #endif
366 
367 #ifdef MOAB_HAVE_MPI
368  // NOTE: we could get the pco also with the following workflow.
369  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
370 
371  auto& pargs = data.pgraph;
372  // free the parallel comm graphs associated with this app
373  for( auto mt = pargs.begin(); mt != pargs.end(); ++mt )
374  {
375  ParCommGraph* pgr = mt->second;
376  if( pgr != nullptr )
377  {
378  delete pgr;
379  pgr = nullptr;
380  }
381  }
382  // now free the ParallelComm resources
383  if( data.pcomm )
384  {
385  delete data.pcomm;
386  data.pcomm = nullptr;
387  }
388 #endif
389 
390  // delete first all except vertices
391  Range vertices = fileents.subset_by_type( MBVERTEX );
392  Range noverts = subtract( fileents, vertices );
393 
394  MB_CHK_ERR( context.MBI->delete_entities( noverts ) );
395  // now retrieve connected elements that still exist (maybe in other sets, pids?)
396  Range adj_ents_left;
397  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION ) );
398  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION ) );
399  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION ) );
400 
401  if( !adj_ents_left.empty() )
402  {
403  Range conn_verts;
404  MB_CHK_ERR( context.MBI->get_connectivity( adj_ents_left, conn_verts ) );
405  vertices = subtract( vertices, conn_verts );
406  }
407 
408  MB_CHK_ERR( context.MBI->delete_entities( vertices ) );
409 
410  for( auto mit = context.appIdMap.begin(); mit != context.appIdMap.end(); ++mit )
411  {
412  if( *pid == mit->second )
413  {
414 #ifdef MOAB_HAVE_MPI
415  if( data.pcomm )
416  {
417  delete data.pcomm;
418  data.pcomm = nullptr;
419  }
420 #endif
421  context.appIdMap.erase( mit );
422  break;
423  }
424  }
425 
426  // now we can finally delete the application data itself
427  context.appDatas.erase( appIterator );
428 
429  return moab::MB_SUCCESS;
430 }

References GlobalContext::appDatas, GlobalContext::appIdMap, context, moab::Interface::delete_entities(), moab::Range::empty(), appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Interface::get_entities_by_type(), appData::global_id, moab::Range::insert(), MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, appData::name, appData::pcomm, appData::pgraph, moab::ParallelComm::rank(), moab::Range::subset_by_type(), moab::subtract(), and moab::Interface::UNION.

Referenced by iMOAB_DeregisterApplicationFortran().

◆ iMOAB_DeregisterApplicationFortran()

ErrCode iMOAB_DeregisterApplicationFortran ( iMOAB_AppID  pid)

De-Register the Fortran based application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 432 of file iMOAB.cpp.

433 {
434  // release any Fortran specific allocations here before we pass it on
435  context.appDatas[*pid].is_fortran = false;
436 
437  // release all datastructure allocations
438  return iMOAB_DeregisterApplication( pid );
439 }

References GlobalContext::appDatas, context, and iMOAB_DeregisterApplication().

◆ iMOAB_DetermineGhostEntities()

ErrCode iMOAB_DetermineGhostEntities ( iMOAB_AppID  pid,
int *  ghost_dim,
int *  num_ghost_layers,
int *  bridge_dim 
)

Create the requested number of ghost layers for the parallel mesh.

Note
The routine assumes that the shared entities were resolved successfully, and that the mesh is properly distributed on separate tasks.

Operations: Collective.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]ghost_dim(int*) Desired ghost dimension (2 or 3, most of the time).
[in]num_ghost_layers(int*) Number of ghost layers requested.
[in]bridge_dim(int*) Bridge dimension (0 for vertices, 1 for edges, 2 for faces).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2375 of file iMOAB.cpp.

2376 {
2377  ErrorCode rval;
2378 
2379  // verify we have valid ghost layers input specified. If invalid, exit quick.
2380  if( num_ghost_layers && *num_ghost_layers <= 0 )
2381  {
2382  return moab::MB_SUCCESS;
2383  } // nothing to do
2384 
2385  appData& data = context.appDatas[*pid];
2386  ParallelComm* pco = context.appDatas[*pid].pcomm;
2387 
2388  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2389  constexpr int addl_ents = 0;
2390  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2391  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2392  for( int i = 2; i <= *num_ghost_layers; i++ )
2393  {
2394  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2395  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2396  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2397  }
2398 
2399  // Update ghost layer information
2400  data.num_ghost_layers = *num_ghost_layers;
2401 
2402  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2403  return iMOAB_UpdateMeshInfo( pid );
2404 }

References GlobalContext::appDatas, context, moab::ParallelComm::correct_thin_ghost_layers(), ErrorCode, moab::ParallelComm::exchange_ghost_cells(), appData::file_set, iMOAB_UpdateMeshInfo(), MB_CHK_ERR, MB_SUCCESS, and appData::num_ghost_layers.

◆ iMOAB_Finalize()

ErrCode iMOAB_Finalize ( void  )

Finalize the iMOAB interface implementation.

It will delete the internally reference counted MOAB instance, if the reference count reaches 0.

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 182 of file iMOAB.cpp.

183 {
185 
186  if( 0 == context.refCountMB )
187  {
188  delete context.MBI;
189  }
190 
191  return MB_SUCCESS;
192 }

References context, MB_SUCCESS, GlobalContext::MBI, and GlobalContext::refCountMB.

◆ iMOAB_FreeSenderBuffers()

ErrCode iMOAB_FreeSenderBuffers ( iMOAB_AppID  pid,
int *  context_id 
)

Definition at line 2808 of file iMOAB.cpp.

2809 {
2810  // need first to find the pgraph that holds the information we need
2811  // this will be called on sender side only
2812  appData& data = context.appDatas[*pid];
2813  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2814  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2815 
2816  mt->second->release_send_buffers();
2817  return moab::MB_SUCCESS;
2818 }

References GlobalContext::appDatas, context, MB_SUCCESS, and appData::pgraph.

◆ iMOAB_GetBlockElementConnectivities()

ErrCode iMOAB_GetBlockElementConnectivities ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivities for elements contained within a certain block.

Note
input is the block ID. Should we change to visible block local index?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]connectivity_length(int*) The allocated size of array. (typical size := vertices_per_element*num_elements_in_block)
[out]element_connectivity(int*) The connectivity array to store element ordering in MOAB canonical numbering scheme (array allocated by user); array contains vertex indices in the local numbering order for vertices elements are in the same order as provided by GetElementOwnership and GetElementID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1166 of file iMOAB.cpp.

1170 {
1171  assert( global_block_ID ); // ensure global block ID argument is not null
1172  assert( connectivity_length ); // ensure connectivity length argument is not null
1173 
1174  appData& data = context.appDatas[*pid];
1175  std::map< int, int >& matMap = data.matIndex;
1176  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1177 
1178  if( it == matMap.end() )
1179  {
1180  return moab::MB_FAILURE;
1181  } // error in finding block with id
1182 
1183  int blockIndex = matMap[*global_block_ID];
1184  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1185  std::vector< EntityHandle > elems;
1186 
1187  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1188 
1189  if( elems.empty() )
1190  {
1191  return moab::MB_FAILURE;
1192  }
1193 
1194  std::vector< EntityHandle > vconnect;
1195  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1196 
1197  if( *connectivity_length != (int)vconnect.size() )
1198  {
1199  return moab::MB_FAILURE;
1200  } // mismatched sizes
1201 
1202  for( int i = 0; i < *connectivity_length; i++ )
1203  {
1204  int inx = data.all_verts.index( vconnect[i] );
1205 
1206  if( -1 == inx )
1207  {
1208  return moab::MB_FAILURE;
1209  } // error, vertex not in local range
1210 
1211  element_connectivity[i] = inx;
1212  }
1213 
1214  return moab::MB_SUCCESS;
1215 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, and GlobalContext::MBI.

◆ iMOAB_GetBlockID()

ErrCode iMOAB_GetBlockID ( iMOAB_AppID  pid,
int *  block_length,
iMOAB_GlobalID global_block_IDs 
)

Get the global block IDs for all locally visible (owned and shared/ghosted) blocks.

Note
Block IDs are corresponding to MATERIAL_SET tags for material sets. Usually the block ID is exported from Cubit as a unique integer value. First blocks are local, and next blocks are fully ghosted. First blocks have at least one owned cell/element, ghost blocks have only ghost cells. Internally, a block corresponds to a mesh set with a MATERIAL_SET tag value equal to the block ID.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]block_length(int*) The allocated size of array (typical size := num_visible_blocks).
[out]global_block_IDs(iMOAB_GlobalID*) The global IDs for all locally visible blocks (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1043 of file iMOAB.cpp.

1044 {
1045  // local id blocks? they are counted from 0 to number of visible blocks ...
1046  // will actually return material set tag value for global
1047  Range& matSets = context.appDatas[*pid].mat_sets;
1048 
1049  if( *block_length != (int)matSets.size() )
1050  {
1051  return moab::MB_FAILURE;
1052  }
1053 
1054  // return material set tag gtags[0 is material set tag
1055  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1056 
1057  // populate map with index
1058  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1059  for( unsigned i = 0; i < matSets.size(); i++ )
1060  {
1061  matIdx[global_block_IDs[i]] = i;
1062  }
1063 
1064  return moab::MB_SUCCESS;
1065 }

References GlobalContext::appDatas, context, ErrorCode, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetBlockInfo()

ErrCode iMOAB_GetBlockInfo ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  vertices_per_element,
int *  num_elements_in_block 
)

Get the global block information and number of visible elements of belonging to a block (MATERIAL SET).

Note
A block has to be homogeneous, it can contain elements of a single type.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set to be queried.
[out]vertices_per_element(int*) The number of vertices per element.
[out]num_elements_in_block(int*) The number of elements in block.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1067 of file iMOAB.cpp.

1071 {
1072  assert( global_block_ID );
1073 
1074  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1075  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1076 
1077  if( it == matMap.end() )
1078  {
1079  return moab::MB_FAILURE;
1080  } // error in finding block with id
1081 
1082  int blockIndex = matMap[*global_block_ID];
1083  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1084  Range blo_elems;
1085  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1086 
1087  if( MB_SUCCESS != rval || blo_elems.empty() )
1088  {
1089  return moab::MB_FAILURE;
1090  }
1091 
1092  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1093 
1094  if( !blo_elems.all_of_type( type ) )
1095  {
1096  return moab::MB_FAILURE;
1097  } // not all of same type
1098 
1099  const EntityHandle* conn = nullptr;
1100  int num_verts = 0;
1101  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1102 
1103  *vertices_per_element = num_verts;
1104  *num_elements_in_block = (int)blo_elems.size();
1105 
1106  return moab::MB_SUCCESS;
1107 }

References moab::Range::all_of_type(), GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::type_from_handle().

◆ iMOAB_GetDoubleTagStorage()

ErrCode iMOAB_GetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Retrieve the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names to retrieve the data in MOAB.
[in]num_tag_storage_length(int) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity*num_tags or num_visible_elements*components_per_entity*num_tags)
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 2029 of file iMOAB.cpp.

2034 {
2035  ErrorCode rval;
2036  // exactly the same code, except tag type check
2037  std::string tag_names( tag_storage_names );
2038  // exactly the same code as for int tag :) maybe should check the type of tag too
2039  std::vector< std::string > tagNames;
2040  std::vector< Tag > tagHandles;
2041  std::string separator( ":" );
2042  split_tag_names( tag_names, separator, tagNames );
2043 
2044  appData& data = context.appDatas[*pid];
2045 
2046  // set it on a subset of entities, based on type and length
2047  Range* ents_to_get = nullptr;
2048 
2049  if( *ent_type == 0 ) // vertices
2050  {
2051  ents_to_get = &data.all_verts;
2052  }
2053  else if( *ent_type == 1 )
2054  {
2055  ents_to_get = &data.primary_elems;
2056  }
2057  int nents_to_get = (int)ents_to_get->size();
2058  int position = 0;
2059  for( size_t i = 0; i < tagNames.size(); i++ )
2060  {
2061  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2062  {
2063  return moab::MB_FAILURE;
2064  } // tag not defined
2065 
2066  Tag tag = data.tagMap[tagNames[i]];
2067 
2068  int tagLength = 0;
2069  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2070 
2071  DataType dtype;
2072  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2073 
2074  if( dtype != MB_TYPE_DOUBLE )
2075  {
2076  return moab::MB_FAILURE;
2077  }
2078 
2079  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2080  return moab::MB_FAILURE; // too many entity values to get
2081 
2082  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2083  position = position + nents_to_get * tagLength;
2084  }
2085 
2086  return moab::MB_SUCCESS; // no error
2087 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetElementConnectivity()

ErrCode iMOAB_GetElementConnectivity ( iMOAB_AppID  pid,
iMOAB_LocalID elem_index,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivity for one element only.

Note
This is a convenience method and in general should not be used unless necessary. You should use colaesced calls for multiple elements for efficiency.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]elem_index(iMOAB_LocalID *) Local element index.
[in,out]connectivity_length(int *) On input, maximum length of connectivity. On output, actual length.
[out]element_connectivity(int*) The connectivity array to store connectivity in MOAB canonical numbering scheme. Array contains vertex indices in the local numbering order for vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1217 of file iMOAB.cpp.

1221 {
1222  assert( elem_index ); // ensure element index argument is not null
1223  assert( connectivity_length ); // ensure connectivity length argument is not null
1224 
1225  appData& data = context.appDatas[*pid];
1226  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1227 
1228  int num_nodes;
1229  const EntityHandle* conn;
1230 
1231  EntityHandle eh = data.primary_elems[*elem_index];
1232 
1233  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1234 
1235  if( *connectivity_length < num_nodes )
1236  {
1237  return moab::MB_FAILURE;
1238  } // wrong number of vertices
1239 
1240  for( int i = 0; i < num_nodes; i++ )
1241  {
1242  int index = data.all_verts.index( conn[i] );
1243 
1244  if( -1 == index )
1245  {
1246  return moab::MB_FAILURE;
1247  }
1248 
1249  element_connectivity[i] = index;
1250  }
1251 
1252  *connectivity_length = num_nodes;
1253 
1254  return moab::MB_SUCCESS;
1255 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetElementID()

ErrCode iMOAB_GetElementID ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
iMOAB_GlobalID global_element_ID,
iMOAB_LocalID local_element_ID 
)

Get the global IDs for all locally visible elements belonging to a particular block.

Note
The method will return also the local index of each element, in the local range that contains all visible elements.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of global element ID array, same as num_elements_in_block returned from GetBlockInfo().
[out]global_element_ID(iMOAB_GlobalID*) The global IDs for all locally visible elements (array allocated by user).
[out]local_element_ID(iMOAB_LocalID*) (Optional) The local IDs for all locally visible elements (index in the range of all primary elements in the rank)
Returns
ErrCode The error code indicating success or failure.

Definition at line 1307 of file iMOAB.cpp.

1312 {
1313  assert( global_block_ID ); // ensure global block ID argument is not null
1314  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1315 
1316  appData& data = context.appDatas[*pid];
1317  std::map< int, int >& matMap = data.matIndex;
1318 
1319  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1320 
1321  if( it == matMap.end() )
1322  {
1323  return moab::MB_FAILURE;
1324  } // error in finding block with id
1325 
1326  int blockIndex = matMap[*global_block_ID];
1327  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1328  Range elems;
1329  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1330 
1331  if( elems.empty() )
1332  {
1333  return moab::MB_FAILURE;
1334  }
1335 
1336  if( *num_elements_in_block != (int)elems.size() )
1337  {
1338  return moab::MB_FAILURE;
1339  } // bad memory allocation
1340 
1341  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1342 
1343  // check that elems are among primary_elems in data
1344  for( int i = 0; i < *num_elements_in_block; i++ )
1345  {
1346  local_element_ID[i] = data.primary_elems.index( elems[i] );
1347 
1348  if( -1 == local_element_ID[i] )
1349  {
1350  return moab::MB_FAILURE;
1351  } // error, not in local primary elements
1352  }
1353 
1354  return moab::MB_SUCCESS;
1355 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_entities_by_handle(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetElementOwnership()

ErrCode iMOAB_GetElementOwnership ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
int *  element_ownership 
)

Get the element ownership within a certain block i.e., processor ID of the element owner.

Note
: Should we use local block index for input, instead of the global block ID ?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of ownership array, same as num_elements_in_block returned from GetBlockInfo().
[out]element_ownership(int*) The ownership array to store processor ID for all elements (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1257 of file iMOAB.cpp.

1261 {
1262  assert( global_block_ID ); // ensure global block ID argument is not null
1263  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1264 
1265  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1266 
1267  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1268 
1269  if( it == matMap.end() )
1270  {
1271  return moab::MB_FAILURE;
1272  } // error in finding block with id
1273 
1274  int blockIndex = matMap[*global_block_ID];
1275  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1276  Range elems;
1277 
1278  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1279 
1280  if( elems.empty() )
1281  {
1282  return moab::MB_FAILURE;
1283  }
1284 
1285  if( *num_elements_in_block != (int)elems.size() )
1286  {
1287  return moab::MB_FAILURE;
1288  } // bad memory allocation
1289 
1290  int i = 0;
1291 #ifdef MOAB_HAVE_MPI
1292  ParallelComm* pco = context.appDatas[*pid].pcomm;
1293 #endif
1294 
1295  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1296  {
1297 #ifdef MOAB_HAVE_MPI
1298  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1299 #else
1300  element_ownership[i] = 0; /* owned by 0 */
1301 #endif
1302  }
1303 
1304  return moab::MB_SUCCESS;
1305 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::empty(), moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::size().

◆ iMOAB_GetGlobalInfo()

ErrCode iMOAB_GetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Get global information about number of vertices and number of elements.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2306 of file iMOAB.cpp.

2307 {
2308  appData& data = context.appDatas[*pid];
2309  if( nullptr != num_global_verts )
2310  {
2311  *num_global_verts = data.num_global_vertices;
2312  }
2313  if( nullptr != num_global_elems )
2314  {
2315  *num_global_elems = data.num_global_elements;
2316  }
2317 
2318  return moab::MB_SUCCESS;
2319 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

◆ iMOAB_GetIntTagStorage()

ErrCode iMOAB_GetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Get the specified values in a MOAB integer Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to be copied from the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1669 of file iMOAB.cpp.

1674 {
1675  ErrorCode rval;
1676  std::string tag_name( tag_storage_name );
1677 
1678  appData& data = context.appDatas[*pid];
1679 
1680  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1681  {
1682  return moab::MB_FAILURE;
1683  } // tag not defined
1684 
1685  Tag tag = data.tagMap[tag_name];
1686 
1687  int tagLength = 0;
1688  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1689 
1690  DataType dtype;
1691  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1692 
1693  if( dtype != MB_TYPE_INTEGER )
1694  {
1695  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1696  }
1697 
1698  // set it on a subset of entities, based on type and length
1699  // if *entity_type = 0, then use vertices; else elements
1700  Range* ents_to_get = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1701  int nents_to_get = *num_tag_storage_length / tagLength;
1702 
1703  if( nents_to_get > (int)ents_to_get->size() )
1704  {
1705  return moab::MB_FAILURE;
1706  } // to many entities to get, or too little
1707 
1708  // now set the tag data
1709  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1710 
1711  return moab::MB_SUCCESS; // no error
1712 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetMeshInfo()

ErrCode iMOAB_GetMeshInfo ( iMOAB_AppID  pid,
int *  num_visible_vertices,
int *  num_visible_elements,
int *  num_visible_blocks,
int *  num_visible_surfaceBC,
int *  num_visible_vertexBC 
)

Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions.

Note
Number of visible vertices and cells include ghost entities. All arrays returned have size 3. Local entities are first, then ghost entities are next. Shared vertices can be owned in MOAB sense by different tasks. Ghost vertices and cells are always owned by other tasks.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[out]num_visible_vertices(int*) The number of vertices in the current partition/process arranged. as: owned/shared only, ghosted, total_visible (array allocated by user, size := 3).
[out]num_visible_elements(int*) The number of elements in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_blocks(int*) The number of material sets in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_surfaceBC(int*) The number of mesh surfaces that have a NEUMANN_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_vertexBC(int*) The number of vertices that have a DIRICHLET_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
Returns
ErrCode The error code indicating success or failure.

Definition at line 885 of file iMOAB.cpp.

891 {
892  ErrorCode rval;
893  appData& data = context.appDatas[*pid];
894  EntityHandle fileSet = data.file_set;
895 
896  // this will include ghost elements
897  // first clear all data ranges; this can be called after ghosting
898  if( num_visible_elements )
899  {
900  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
901  // separate ghost and local/owned primary elements
902  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
903  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
904  }
905  if( num_visible_vertices )
906  {
907  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
908  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
909  // local are those that are not ghosts; they may include shared, not owned vertices
910  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
911  }
912 
913  if( num_visible_blocks )
914  {
916  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
917 
918  num_visible_blocks[2] = data.mat_sets.size();
919  num_visible_blocks[0] = num_visible_blocks[2];
920  num_visible_blocks[1] = 0;
921  }
922 
923  if( num_visible_surfaceBC )
924  {
926  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
927 
928  num_visible_surfaceBC[2] = 0;
929  // count how many faces are in each neu set, and how many regions are
930  // adjacent to them;
931  int numNeuSets = (int)data.neu_sets.size();
932 
933  for( int i = 0; i < numNeuSets; i++ )
934  {
935  Range subents;
936  EntityHandle nset = data.neu_sets[i];
937  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
938 
939  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
940  {
941  EntityHandle subent = *it;
942  Range adjPrimaryEnts;
943  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
944 
945  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
946  }
947  }
948 
949  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
950  num_visible_surfaceBC[1] = 0;
951  }
952 
953  if( num_visible_vertexBC )
954  {
956  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
957 
958  num_visible_vertexBC[2] = 0;
959  int numDiriSets = (int)data.diri_sets.size();
960 
961  for( int i = 0; i < numDiriSets; i++ )
962  {
963  Range verts;
964  EntityHandle diset = data.diri_sets[i];
965  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
966 
967  num_visible_vertexBC[2] += (int)verts.size();
968  }
969 
970  num_visible_vertexBC[0] = num_visible_vertexBC[2];
971  num_visible_vertexBC[1] = 0;
972  }
973 
974  return moab::MB_SUCCESS;
975 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::primary_elems, moab::Range::size(), and moab::Interface::UNION.

◆ iMOAB_GetNeighborElements()

ErrCode iMOAB_GetNeighborElements ( iMOAB_AppID  pid,
iMOAB_LocalID local_index,
int *  num_adjacent_elements,
iMOAB_LocalID adjacent_element_IDs 
)

Retrieve the adjacencies for the element entities.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]local_index(iMOAB_LocalID*) The local element ID for which adjacency information is needed.
[out]num_adjacent_elements(int*) The total number of adjacent elements.
[out]adjacent_element_IDs(iMOAB_LocalID*) The local element IDs of all adjacent elements to the current one (typically, num_total_sides for internal elements or num_total_sides-num_sides_on_boundary for boundary elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2174 of file iMOAB.cpp.

2178 {
2179  ErrorCode rval;
2180 
2181  // one neighbor for each subentity of dimension-1
2182  MeshTopoUtil mtu( context.MBI );
2183  appData& data = context.appDatas[*pid];
2184  EntityHandle eh = data.primary_elems[*local_index];
2185  Range adjs;
2186  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2187 
2188  if( *num_adjacent_elements < (int)adjs.size() )
2189  {
2190  return moab::MB_FAILURE;
2191  } // not dimensioned correctly
2192 
2193  *num_adjacent_elements = (int)adjs.size();
2194 
2195  for( int i = 0; i < *num_adjacent_elements; i++ )
2196  {
2197  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2198  }
2199 
2200  return moab::MB_SUCCESS;
2201 }

References GlobalContext::appDatas, context, appData::dimension, ErrorCode, moab::MeshTopoUtil::get_bridge_adjacencies(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetPointerToSurfaceBC()

ErrCode iMOAB_GetPointerToSurfaceBC ( iMOAB_AppID  pid,
int *  surface_BC_length,
iMOAB_LocalID local_element_ID,
int *  reference_surface_ID,
int *  boundary_condition_value 
)

Get the surface boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]surface_BC_length(int*) The allocated size of surface boundary condition array, same as num_visible_surfaceBC returned by GetMeshInfo().
[out]local_element_ID(iMOAB_LocalID*) The local element IDs that contains the side with the surface BC.
[out]reference_surface_ID(int*) The surface number with the BC in the canonical reference element (e.g., 1 to 6 for HEX, 1-4 for TET).
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the NeumannSet defined on the element).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1357 of file iMOAB.cpp.

1362 {
1363  // we have to fill bc data for neumann sets;/
1364  ErrorCode rval;
1365 
1366  // it was counted above, in GetMeshInfo
1367  appData& data = context.appDatas[*pid];
1368  int numNeuSets = (int)data.neu_sets.size();
1369 
1370  int index = 0; // index [0, surface_BC_length) for the arrays returned
1371 
1372  for( int i = 0; i < numNeuSets; i++ )
1373  {
1374  Range subents;
1375  EntityHandle nset = data.neu_sets[i];
1376  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1377 
1378  int neuVal;
1379  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1380 
1381  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1382  {
1383  EntityHandle subent = *it;
1384  Range adjPrimaryEnts;
1385  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1386 
1387  // get global id of the primary ents, and side number of the quad/subentity
1388  // this is moab ordering
1389  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1390  {
1391  EntityHandle primaryEnt = *pit;
1392  // get global id
1393  /*int globalID;
1394  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1395  if (MB_SUCCESS!=rval)
1396  return moab::MB_FAILURE;
1397  global_element_ID[index] = globalID;*/
1398 
1399  // get local element id
1400  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1401 
1402  if( -1 == local_element_ID[index] )
1403  {
1404  return moab::MB_FAILURE;
1405  } // did not find the element locally
1406 
1407  int side_number, sense, offset;
1408  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1409 
1410  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1411  boundary_condition_value[index] = neuVal;
1412  index++;
1413  }
1414  }
1415  }
1416 
1417  if( index != *surface_BC_length )
1418  {
1419  return moab::MB_FAILURE;
1420  } // error in array allocations
1421 
1422  return moab::MB_SUCCESS;
1423 }

References GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, moab::Range::end(), ErrorCode, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::primary_elems, moab::Interface::side_number(), moab::side_number(), moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetPointerToVertexBC()

ErrCode iMOAB_GetPointerToVertexBC ( iMOAB_AppID  pid,
int *  vertex_BC_length,
iMOAB_LocalID local_vertex_ID,
int *  boundary_condition_value 
)

Get the vertex boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertex_BC_length(int) The allocated size of vertex boundary condition array, same as num_visible_vertexBC returned by GetMeshInfo().
[out]local_vertex_ID(iMOAB_LocalID*) The local vertex ID that has Dirichlet BC defined.
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the Dirichlet_Set tag defined on the vertex).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1425 of file iMOAB.cpp.

1429 {
1430  ErrorCode rval;
1431 
1432  // it was counted above, in GetMeshInfo
1433  appData& data = context.appDatas[*pid];
1434  int numDiriSets = (int)data.diri_sets.size();
1435  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1436 
1437  for( int i = 0; i < numDiriSets; i++ )
1438  {
1439  Range verts;
1440  EntityHandle diset = data.diri_sets[i];
1441  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1442 
1443  int diriVal;
1444  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1445 
1446  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1447  {
1448  EntityHandle vt = *vit;
1449  /*int vgid;
1450  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1451  if (MB_SUCCESS!=rval)
1452  return moab::MB_FAILURE;
1453  global_vertext_ID[index] = vgid;*/
1454  local_vertex_ID[index] = data.all_verts.index( vt );
1455 
1456  if( -1 == local_vertex_ID[index] )
1457  {
1458  return moab::MB_FAILURE;
1459  } // vertex was not found
1460 
1461  boundary_condition_value[index] = diriVal;
1462  index++;
1463  }
1464  }
1465 
1466  if( *vertex_BC_length != index )
1467  {
1468  return moab::MB_FAILURE;
1469  } // array allocation issue
1470 
1471  return moab::MB_SUCCESS;
1472 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexID()

ErrCode iMOAB_GetVertexID ( iMOAB_AppID  pid,
int *  vertices_length,
iMOAB_GlobalID global_vertex_ID 
)

Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices.

Note
The array should be allocated by the user, sized with the total number of visible vertices from iMOAB_GetMeshInfo method.

Operations: Not collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typical size := num_visible_vertices).
[out]global_vertex_ID(iMOAB_GlobalID*) The global IDs for all locally visible vertices (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 977 of file iMOAB.cpp.

978 {
979  IMOAB_CHECKPOINTER( vertices_length, 2 );
980  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
981 
982  const Range& verts = context.appDatas[*pid].all_verts;
983  // check for problems with array length
984  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
985 
986  // global id tag is context.globalID_tag
987  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
988 }

References GlobalContext::appDatas, context, GlobalContext::globalID_tag, IMOAB_ASSERT, IMOAB_CHECKPOINTER, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexOwnership()

ErrCode iMOAB_GetVertexOwnership ( iMOAB_AppID  pid,
int *  vertices_length,
int *  visible_global_rank_ID 
)

Get vertex ownership information.

For each vertex based on the local ID, return the process that owns the vertex (local, shared or ghost)

Note
Shared vertices could be owned by different tasks. Local and shared vertices are first, ghost vertices are next in the array. Ghost vertices are always owned by a different process ID. Array allocated by the user with total size of visible vertices.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typically size := num_visible_vertices).
[out]visible_global_rank_ID(int*) The processor rank owning each of the local vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 990 of file iMOAB.cpp.

991 {
992  assert( vertices_length && *vertices_length );
993  assert( visible_global_rank_ID );
994 
995  Range& verts = context.appDatas[*pid].all_verts;
996  int i = 0;
997 #ifdef MOAB_HAVE_MPI
998  ParallelComm* pco = context.appDatas[*pid].pcomm;
999 
1000  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1001  {
1002  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
1003  }
1004 
1005  if( i != *vertices_length )
1006  {
1007  return moab::MB_FAILURE;
1008  } // warning array allocation problem
1009 
1010 #else
1011 
1012  /* everything owned by proc 0 */
1013  if( (int)verts.size() != *vertices_length )
1014  {
1015  return moab::MB_FAILURE;
1016  } // warning array allocation problem
1017 
1018  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1019  {
1020  visible_global_rank_ID[i] = 0;
1021  } // all vertices are owned by processor 0, as this is serial run
1022 
1023 #endif
1024 
1025  return moab::MB_SUCCESS;
1026 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, and moab::Range::size().

◆ iMOAB_GetVisibleElementsInfo()

ErrCode iMOAB_GetVisibleElementsInfo ( iMOAB_AppID  pid,
int *  num_visible_elements,
iMOAB_GlobalID element_global_IDs,
int *  ranks,
iMOAB_GlobalID block_IDs 
)

Get the visible elements information.

Return for all visible elements the global IDs, ranks they belong to, block ids they belong to.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_visible_elements(int*) The number of visible elements (returned by GetMeshInfo).
[out]element_global_IDs(iMOAB_GlobalID*) Element global ids to be added to the block.
[out]ranks(int*) The owning ranks of elements.
[out]block_IDs(iMOAB_GlobalID*) The block ids containing the elements.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1109 of file iMOAB.cpp.

1114 {
1115  appData& data = context.appDatas[*pid];
1116 #ifdef MOAB_HAVE_MPI
1117  ParallelComm* pco = context.appDatas[*pid].pcomm;
1118 #endif
1119 
1120  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1121 
1122  int i = 0;
1123 
1124  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1125  {
1126 #ifdef MOAB_HAVE_MPI
1127  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1128 
1129 #else
1130  /* everything owned by task 0 */
1131  ranks[i] = 0;
1132 #endif
1133  }
1134 
1135  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1136  {
1137  EntityHandle matMeshSet = *mit;
1138  Range elems;
1139  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1140 
1141  int valMatTag;
1142  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1143 
1144  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1145  {
1146  EntityHandle eh = *eit;
1147  int index = data.primary_elems.index( eh );
1148 
1149  if( -1 == index )
1150  {
1151  return moab::MB_FAILURE;
1152  }
1153 
1154  if( -1 >= *num_visible_elements )
1155  {
1156  return moab::MB_FAILURE;
1157  }
1158 
1159  block_IDs[index] = valMatTag;
1160  }
1161  }
1162 
1163  return moab::MB_SUCCESS;
1164 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Interface::tag_get_data().

◆ iMOAB_GetVisibleVerticesCoordinates()

ErrCode iMOAB_GetVisibleVerticesCoordinates ( iMOAB_AppID  pid,
int *  coords_length,
double *  coordinates 
)

Get vertex coordinates for all local (owned and ghosted) vertices.

Note
coordinates are returned in an array allocated by user, interleaved. (do need an option for blocked coordinates ?) size of the array is dimension times number of visible vertices. The local ordering is implicit, owned/shared vertices are first, then ghosts.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_length(int*) The size of the allocated coordinate array (array allocated by user, size := 3*num_visible_vertices).
[out]coordinates(double*) The pointer to user allocated memory that will be filled with interleaved coordinates.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1028 of file iMOAB.cpp.

1029 {
1030  Range& verts = context.appDatas[*pid].all_verts;
1031 
1032  // interleaved coordinates, so that means deep copy anyway
1033  if( *coords_length != 3 * (int)verts.size() )
1034  {
1035  return moab::MB_FAILURE;
1036  }
1037 
1038  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1039 
1040  return moab::MB_SUCCESS;
1041 }

References GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_coords(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::size().

◆ iMOAB_Initialize()

ErrCode iMOAB_Initialize ( int  argc,
iMOAB_String argv 
)

Initialize the iMOAB interface implementation.

Note
Will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Parameters
[in]argc(int) Number of command line arguments
[in]argv(iMOAB_String*) Command line arguments
Returns
ErrCode

Definition at line 133 of file iMOAB.cpp.

134 {
135  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
136 
137  context.iArgc = argc;
138  context.iArgv = argv; // shallow copy
139 
140  if( 0 == context.refCountMB )
141  {
142  context.MBI = new( std::nothrow ) moab::Core;
143  // retrieve the default tags
144  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
146  // materials (blocks), surface-BC (neumann), vertex-BC (Dirichlet), global-id
147  Tag gtags[4];
148  for( int i = 0; i < 4; i++ )
149  {
150  MB_CHK_ERR(
151  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY ) );
152  }
153  // now set the relevant (standard) tags for future use
154  context.material_tag = gtags[0];
155  context.neumann_tag = gtags[1];
156  context.dirichlet_tag = gtags[2];
157  context.globalID_tag = gtags[3];
158 
159  context.MPI_initialized = false;
160 #ifdef MOAB_HAVE_MPI
161  int flagInit;
162  MPI_Initialized( &flagInit );
163 
164  if( flagInit && !context.MPI_initialized )
165  {
166  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
167  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
168  context.MPI_initialized = true;
169  }
170 #endif
171  }
172 
174  return moab::MB_SUCCESS;
175 }

References context, DIRICHLET_SET_TAG_NAME, GlobalContext::dirichlet_tag, GLOBAL_ID_TAG_NAME, GlobalContext::globalID_tag, GlobalContext::globalrank, GlobalContext::iArgc, GlobalContext::iArgv, IMOAB_CHECKPOINTER, MATERIAL_SET_TAG_NAME, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MB_TAG_ANY, MB_TYPE_INTEGER, GlobalContext::MBI, GlobalContext::MPI_initialized, NEUMANN_SET_TAG_NAME, GlobalContext::neumann_tag, GlobalContext::refCountMB, moab::Interface::tag_get_handle(), and GlobalContext::worldprocs.

Referenced by iMOAB_InitializeFortran().

◆ iMOAB_InitializeFortran()

ErrCode iMOAB_InitializeFortran ( void  )

Initialize the iMOAB interface implementation from Fortran driver.

It will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 177 of file iMOAB.cpp.

178 {
179  return iMOAB_Initialize( 0, 0 );
180 }

References iMOAB_Initialize().

◆ iMOAB_LoadMesh()

ErrCode iMOAB_LoadMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  read_options,
int *  num_ghost_layers 
)

Load a MOAB mesh file in parallel and exchange ghost layers as requested.

Note
All communication is MPI-based, and read options include parallel loading information, resolving shared entities. Local MOAB instance is populated with mesh cells and vertices in the corresponding local partitions.

This will also exchange ghost cells and vertices, as requested. The default bridge dimension is 0 (vertices), and all additional lower dimensional sub-entities are exchanged (mesh edges and faces). The tags in the file are not exchanged by default. Default tag information for GLOBAL_ID, MATERIAL_SET, NEUMANN_SET and DIRICHLET_SET is exchanged. Global ID tag is exchanged for all cells and vertices. Material sets, Neumann sets and Dirichlet sets are all augmented with the ghost entities.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to load onto the internal application mesh set.
[in]read_options(iMOAB_String) Additional options for reading the MOAB mesh file in parallel.
[in]num_ghost_layers(int*) The total number of ghost layers to exchange during mesh loading.
Returns
ErrCode The error code indicating success or failure.

Definition at line 585 of file iMOAB.cpp.

589 {
590  IMOAB_CHECKPOINTER( filename, 2 );
591  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
592  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
593 
594  // make sure we use the file set and pcomm associated with the *pid
595  std::ostringstream newopts;
596  if( read_options ) newopts << read_options;
597 
598 #ifdef MOAB_HAVE_MPI
599 
601  {
602  if( context.worldprocs > 1 )
603  {
604  std::string opts( ( read_options ? read_options : "" ) );
605  std::string pcid( "PARALLEL_COMM=" );
606  std::size_t found = opts.find( pcid );
607 
608  if( found != std::string::npos )
609  {
610  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
611  return moab::MB_FAILURE;
612  }
613 
614  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
615  // files (used in test_remapping)
616  std::string filen( filename );
617  std::string::size_type idx = filen.rfind( '.' );
618 
619  if( idx != std::string::npos )
620  {
621  ParallelComm* pco = context.appDatas[*pid].pcomm;
622  std::string extension = filen.substr( idx + 1 );
623  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
624  newopts << ";;PARALLEL_COMM=" << pco->get_id();
625  }
626 
627  if( *num_ghost_layers >= 1 )
628  {
629  // if we want ghosts, we will want additional entities, the last .1
630  // because the addl ents can be edges, faces that are part of the neumann sets
631  std::string pcid2( "PARALLEL_GHOSTS=" );
632  std::size_t found2 = opts.find( pcid2 );
633 
634  if( found2 != std::string::npos )
635  {
636  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
637  "number of layers \n";
638  }
639  else
640  {
641  // dimension of primary entities is 3 here, but it could be 2 for climate
642  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
643  // example: ";PARALLEL_GHOSTS=2.0.1"
644  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
645  }
646  }
647  }
648  }
649 #else
650  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
651 #endif
652 
653  // Now let us actually load the MOAB file with the appropriate read options
654  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
655 
656 #ifdef VERBOSE
657  // some debugging stuff
658  std::ostringstream outfile;
659 #ifdef MOAB_HAVE_MPI
660  ParallelComm* pco = context.appDatas[*pid].pcomm;
661  int rank = pco->rank();
662  int nprocs = pco->size();
663  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
664 #else
665  outfile << "TaskMesh_n1.0.h5m";
666 #endif
667  // the mesh contains ghosts too, but they are not part of mat/neumann set
668  // write in serial the file, to see what tags are missing
669  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
670 #endif
671 
672  // Update ghost layer information
673  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
674 
675  // Update mesh information
676  return iMOAB_UpdateMeshInfo( pid );
677 }

References GlobalContext::appDatas, context, ErrorCode, moab::ParallelComm::get_id(), IMOAB_ASSERT, IMOAB_CHECKPOINTER, iMOAB_UpdateMeshInfo(), moab::Interface::load_file(), MB_CHK_ERR, GlobalContext::MBI, GlobalContext::MPI_initialized, moab::ParallelComm::rank(), moab::ParallelComm::size(), GlobalContext::worldprocs, and moab::Interface::write_file().

◆ iMOAB_MergeVertices()

ErrCode iMOAB_MergeVertices ( iMOAB_AppID  pid)

Definition at line 3150 of file iMOAB.cpp.

3151 {
3152  appData& data = context.appDatas[*pid];
3153  ParallelComm* pco = context.appDatas[*pid].pcomm;
3154  // collapse vertices and transform cells into triangles/quads /polys
3155  // tags we care about: area, frac, global id
3156  std::vector< Tag > tagsList;
3157  Tag tag;
3158  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3159  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3160  tagsList.push_back( tag );
3161  rval = context.MBI->tag_get_handle( "area", tag );
3162  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3163  rval = context.MBI->tag_get_handle( "frac", tag );
3164  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3165  double tol = 1.0e-9;
3166  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3167 
3168  // clean material sets of cells that were deleted
3170  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3171 
3172  if( !data.mat_sets.empty() )
3173  {
3174  EntityHandle matSet = data.mat_sets[0];
3175  Range elems;
3176  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3177  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3178  // put back only cells from data.file_set
3179  elems.clear();
3180  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3181  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3182  }
3183  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3184 
3185  ParallelMergeMesh pmm( pco, tol );
3186  rval = pmm.merge( data.file_set,
3187  /* do not do local merge*/ false,
3188  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3189 
3190  // assign global ids only for vertices, cells have them fine
3191  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3192 
3193  // set the partition tag on the file set
3194  Tag part_tag;
3195  int dum_id = -1;
3196  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3197  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3198 
3199  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3200  {
3201  std::cout << " can't get par part tag.\n";
3202  return moab::MB_FAILURE;
3203  }
3204 
3205  int rank = pco->rank();
3206  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3207 
3208  return moab::MB_SUCCESS;
3209 }

References moab::Interface::add_entities(), GlobalContext::appDatas, moab::ParallelComm::assign_global_ids(), moab::Range::clear(), context, moab::Range::empty(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), iMOAB_UpdateMeshInfo(), appData::mat_sets, GlobalContext::material_tag, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, MBENTITYSET, GlobalContext::MBI, moab::ParallelMergeMesh::merge(), moab::ParallelComm::rank(), moab::IntxUtils::remove_duplicate_vertices(), moab::Interface::remove_entities(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), and moab::Interface::UNION.

◆ iMOAB_ReadHeaderInfo()

ErrCode iMOAB_ReadHeaderInfo ( const iMOAB_String  filename,
int *  num_global_vertices,
int *  num_global_elements,
int *  num_dimension,
int *  num_parts 
)

It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file.

Note
Operations: Not collective
Parameters
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to probe for header information.
[out]num_global_vertices(int*) The total number of vertices in the mesh file.
[out]num_global_elements(int*) The total number of elements (of highest dimension only).
[out]num_dimension(int*) The highest dimension of elements in the mesh (Edge=1, Tri/Quad=2, Tet/Hex/Prism/Pyramid=3).
[out]num_parts(int*) The total number of partitions available in the mesh file, typically partitioned with mbpart during pre-processing.
Returns
ErrCode The error code indicating success or failure.

Definition at line 441 of file iMOAB.cpp.

446 {
447  IMOAB_CHECKPOINTER( filename, 1 );
448  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
449 
450 #ifdef MOAB_HAVE_HDF5
451  std::string filen( filename );
452 
453  int edges = 0;
454  int faces = 0;
455  int regions = 0;
456  if( num_global_vertices ) *num_global_vertices = 0;
457  if( num_global_elements ) *num_global_elements = 0;
458  if( num_dimension ) *num_dimension = 0;
459  if( num_parts ) *num_parts = 0;
460 
461  mhdf_FileHandle file;
462  mhdf_Status status;
463  unsigned long max_id;
464  struct mhdf_FileDesc* data;
465 
466  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
467 
468  if( mhdf_isError( &status ) )
469  {
470  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
471  return moab::MB_FAILURE;
472  }
473 
474  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
475  1 ); // will use extra set info; will get parallel partition tag info too!
476 
477  if( mhdf_isError( &status ) )
478  {
479  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
480  return moab::MB_FAILURE;
481  }
482 
483  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
484  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
485 
486  for( int i = 0; i < data->num_elem_desc; i++ )
487  {
488  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
489  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
490 
491  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
492  {
493  edges += ent_d->count;
494  }
495 
496  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
497  {
498  faces += ent_d->count;
499  }
500 
501  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
502  {
503  faces += ent_d->count;
504  }
505 
506  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
507  {
508  faces += ent_d->count;
509  }
510 
511  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
512  {
513  regions += ent_d->count;
514  }
515 
516  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
517  {
518  regions += ent_d->count;
519  }
520 
521  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
522  {
523  regions += ent_d->count;
524  }
525 
526  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
527  {
528  regions += ent_d->count;
529  }
530 
531  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
532  {
533  regions += ent_d->count;
534  }
535 
536  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
537  {
538  regions += ent_d->count;
539  }
540 
541  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
542  {
543  regions += ent_d->count;
544  }
545  }
546 
547  if( num_parts ) *num_parts = data->numEntSets[0];
548 
549  // is this required?
550  if( edges > 0 )
551  {
552  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
553  if( num_global_elements ) *num_global_elements = edges;
554  }
555 
556  if( faces > 0 )
557  {
558  if( num_dimension ) *num_dimension = 2;
559  if( num_global_elements ) *num_global_elements = faces;
560  }
561 
562  if( regions > 0 )
563  {
564  if( num_dimension ) *num_dimension = 3;
565  if( num_global_elements ) *num_global_elements = regions;
566  }
567 
568  mhdf_closeFile( file, &status );
569 
570  free( data );
571 
572 #else
573  std::cout << filename
574  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
575  "formats other than a h5m file.\n";
576  if( num_global_vertices ) *num_global_vertices = 0;
577  if( num_global_elements ) *num_global_elements = 0;
578  if( num_dimension ) *num_dimension = 0;
579  if( num_parts ) *num_parts = 0;
580 #endif
581 
582  return moab::MB_SUCCESS;
583 }

References mhdf_EntDesc::count, mhdf_ElemDesc::desc, mhdf_FileDesc::elems, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_SUCCESS, mdhf_HEX_TYPE_NAME, mdhf_KNIFE_TYPE_NAME, mhdf_closeFile(), mhdf_EDGE_TYPE_NAME, mhdf_getFileSummary(), mhdf_isError(), mhdf_message(), mhdf_openFile(), mhdf_POLYGON_TYPE_NAME, mhdf_POLYHEDRON_TYPE_NAME, mhdf_PRISM_TYPE_NAME, mhdf_PYRAMID_TYPE_NAME, mhdf_QUAD_TYPE_NAME, mhdf_SEPTAHEDRON_TYPE_NAME, mhdf_TET_TYPE_NAME, mhdf_TRI_TYPE_NAME, mhdf_FileDesc::nodes, mhdf_FileDesc::num_elem_desc, mhdf_FileDesc::numEntSets, mhdf_ElemDesc::type, and mhdf_EntDesc::vals_per_ent.

◆ iMOAB_ReceiveElementTag()

ErrCode iMOAB_ReceiveElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  joint_communicator,
int *  context_id 
)

Definition at line 2735 of file iMOAB.cpp.

2739 {
2740  appData& data = context.appDatas[*pid];
2741  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2742  if( mt == data.pgraph.end() )
2743  {
2744  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2745  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2746  std::cout << " " << mit->first;
2747  std::cout << "\n";
2748  return moab::MB_FAILURE;
2749  }
2750 
2751  ParCommGraph* cgraph = mt->second;
2752  ParallelComm* pco = context.appDatas[*pid].pcomm;
2753  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2754  : *joint_communicator );
2755  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2756 
2757  // how do I know if this receiver already participated in an intersection driven by coupler?
2758  // also, what if this was the "source" mesh in intx?
2759  // in that case, the elements might have been instantiated in the coverage set locally, the
2760  // "owned" range can be different the elements are now in tempestRemap coverage_set
2761  EntityHandle cover_set = cgraph->get_cover_set();
2762  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2763 
2764  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2765  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2766  // from ints remapper
2767 #ifdef MOAB_HAVE_TEMPESTREMAP
2768  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2769  {
2770  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2771  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2772  }
2773 #endif
2774 
2775  std::string tag_name( tag_storage_name );
2776  // we assume that there are separators ";" between the tag names
2777  std::vector< std::string > tagNames;
2778  std::vector< Tag > tagHandles;
2779  std::string separator( ":" );
2780  split_tag_names( tag_name, separator, tagNames );
2781  for( size_t i = 0; i < tagNames.size(); i++ )
2782  {
2783  Tag tagHandle;
2784  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2785  if( MB_SUCCESS != rval || nullptr == tagHandle )
2786  {
2787  MB_CHK_SET_ERR( moab::MB_FAILURE,
2788  " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i );
2789  }
2790  tagHandles.push_back( tagHandle );
2791  }
2792 
2793 #ifdef VERBOSE
2794  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2795  << " and file set = " << ( data.file_set ) << "\n";
2796 #endif
2797  // pco is needed to pack, and for moab instance, not for communication!
2798  // still use nonblocking communication
2799  MB_CHK_SET_ERR( cgraph->receive_tag_values( global, pco, owned, tagHandles ), "failed to receive tag values" );
2800 
2801 #ifdef VERBOSE
2802  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2803 #endif
2804 
2805  return moab::MB_SUCCESS;
2806 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, appData::file_set, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, appData::pgraph, appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_ReceiveMesh()

ErrCode iMOAB_ReceiveMesh ( iMOAB_AppID  pid,
MPI_Comm *  joint_communicator,
MPI_Group *  sendingGroup,
int *  scompid 
)

Definition at line 2502 of file iMOAB.cpp.

2503 {
2504  assert( joint_communicator != nullptr );
2505  assert( sendingGroup != nullptr );
2506  assert( scompid != nullptr );
2507 
2508  ErrorCode rval;
2509  appData& data = context.appDatas[*pid];
2510  ParallelComm* pco = context.appDatas[*pid].pcomm;
2511  MPI_Comm receive = pco->comm();
2512  EntityHandle local_set = data.file_set;
2513 
2514  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2515  : *joint_communicator );
2516  MPI_Group sendGroup =
2517  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2518 
2519  // first see what are the processors in each group; get the sender group too, from the sender
2520  // communicator
2521  MPI_Group receiverGroup;
2522  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2523 
2524  // instantiate the par comm graph
2525  ParCommGraph* cgraph =
2526  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2527  // TODO we should search if we have another pcomm with the same comp ids in the list already
2528  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2529  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2530 
2531  int receiver_rank = -1;
2532  MPI_Comm_rank( receive, &receiver_rank );
2533 
2534  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2535  // knows what data to expect
2536  std::vector< int > pack_array;
2537  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2538 
2539  // senders across for the current receiver
2540  int current_receiver = cgraph->receiver( receiver_rank );
2541 
2542  std::vector< int > senders_local;
2543  size_t n = 0;
2544 
2545  while( n < pack_array.size() )
2546  {
2547  if( current_receiver == pack_array[n] )
2548  {
2549  for( int j = 0; j < pack_array[n + 1]; j++ )
2550  {
2551  senders_local.push_back( pack_array[n + 2 + j] );
2552  }
2553 
2554  break;
2555  }
2556 
2557  n = n + 2 + pack_array[n + 1];
2558  }
2559 
2560 #ifdef VERBOSE
2561  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2562  << senders_local.size() << " tasks: ";
2563 
2564  for( int k = 0; k < (int)senders_local.size(); k++ )
2565  {
2566  std::cout << " " << senders_local[k];
2567  }
2568 
2569  std::cout << "\n";
2570 #endif
2571 
2572  if( senders_local.empty() )
2573  {
2574  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2575  }
2576  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2577 
2578  // after we are done, we could merge vertices that come from different senders, but
2579  // have the same global id
2580  Tag idtag;
2581  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2582 
2583  // data.point_cloud = false;
2584  Range local_ents;
2585  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2586 
2587  // do not do merge if point cloud
2588  if( !local_ents.all_of_type( MBVERTEX ) )
2589  {
2590  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2591  // that might come from different senders
2592  {
2593 
2594  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2595  Range local_elems = subtract( local_ents, local_verts );
2596 
2597  // remove from local set the vertices
2598  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2599 
2600 #ifdef VERBOSE
2601  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2602 #endif
2603  MergeMesh mm( context.MBI );
2604 
2605  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2606 
2607  Range new_verts; // local elems are local entities without vertices
2608  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2609 
2610 #ifdef VERBOSE
2611  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2612 #endif
2613  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2614  }
2615  }
2616  else
2617  data.point_cloud = true;
2618 
2619  if( !data.point_cloud )
2620  {
2621  // still need to resolve shared entities (in this case, vertices )
2622  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2623  }
2624  else
2625  {
2626  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2627  Tag densePartTag;
2628  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2629  if( nullptr != densePartTag && MB_SUCCESS == rval )
2630  {
2631  Range local_verts;
2632  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2633  std::vector< int > vals;
2634  int rank = pco->rank();
2635  vals.resize( local_verts.size(), rank );
2636  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2637  }
2638  }
2639  // set the parallel partition tag
2640  Tag part_tag;
2641  int dum_id = -1;
2642  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2643  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2644 
2645  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2646  {
2647  std::cout << " can't get par part tag.\n";
2648  return moab::MB_FAILURE;
2649  }
2650 
2651  int rank = pco->rank();
2652  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2653 
2654  // make sure that the GLOBAL_ID is defined as a tag
2655  int tagtype = 0; // dense, integer
2656  int numco = 1; // size
2657  int tagindex; // not used
2658  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2659  // populate the mesh with current data info
2660  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2661 
2662  // mark for deletion
2663  MPI_Group_free( &receiverGroup );
2664 
2665  return moab::MB_SUCCESS;
2666 }

References moab::Interface::add_entities(), moab::Range::all_of_type(), GlobalContext::appDatas, CHK_MPI_ERR, moab::ParallelComm::comm(), context, ErrorCode, appData::file_set, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_handle(), iMOAB_DefineTagStorage(), iMOAB_UpdateMeshInfo(), appData::is_fortran, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, MBVERTEX, moab::MergeMesh::merge_using_integer_tag(), appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_comm_graph(), moab::ParCommGraph::receive_mesh(), moab::ParCommGraph::receiver(), moab::Interface::remove_entities(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Range::subset_by_type(), moab::subtract(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_ReduceTagsMax()

ErrCode iMOAB_ReduceTagsMax ( iMOAB_AppID  pid,
int *  tag_index,
int *  ent_type 
)

Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_index(int*) The tag index of interest.
[in]ent_type(int*) The type of entity for tag reduction operation (vertices = 0, elements = 1)
Returns
ErrCode The error code indicating success or failure.

Definition at line 2134 of file iMOAB.cpp.

2135 {
2136 #ifdef MOAB_HAVE_MPI
2137  appData& data = context.appDatas[*pid];
2138  Range ent_exchange;
2139 
2140  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2141  {
2142  return moab::MB_FAILURE;
2143  } // error in tag index
2144 
2145  Tag tagh = data.tagList[*tag_index];
2146 
2147  if( *ent_type == 0 )
2148  {
2149  ent_exchange = data.all_verts;
2150  }
2151  else if( *ent_type == 1 )
2152  {
2153  ent_exchange = data.primary_elems;
2154  }
2155  else
2156  {
2157  return moab::MB_FAILURE;
2158  } // unexpected type
2159 
2160  ParallelComm* pco = context.appDatas[*pid].pcomm;
2161  // we could do different MPI_Op; do not bother now, we will call from fortran
2162  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2163 
2164 #else
2165  /* do nothing if serial */
2166  // just silence the warning
2167  // do not call sync tags in serial!
2168  int k = *pid + *tag_index + *ent_type;
2169  k++; // just do junk, to avoid complaints
2170 #endif
2171  return moab::MB_SUCCESS;
2172 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, appData::primary_elems, moab::ParallelComm::reduce_tags(), and appData::tagList.

◆ iMOAB_RegisterApplication()

ErrCode iMOAB_RegisterApplication ( const iMOAB_String  app_name,
MPI_Comm *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 218 of file iMOAB.cpp.

224 {
225  IMOAB_CHECKPOINTER( app_name, 1 );
226 #ifdef MOAB_HAVE_MPI
227  IMOAB_CHECKPOINTER( comm, 2 );
228  IMOAB_CHECKPOINTER( compid, 3 );
229 #else
230  IMOAB_CHECKPOINTER( compid, 2 );
231 #endif
232 
233  // will create a parallel comm for this application too, so there will be a
234  // mapping from *pid to file set and to parallel comm instances
235  std::string name( app_name );
236 
237  if( context.appIdMap.find( name ) != context.appIdMap.end() )
238  {
239  std::cout << " application " << name << " already registered \n";
240  return moab::MB_FAILURE;
241  }
242 
243  // trivial hash: just use the id that the user provided and assume it is unique
244  // *pid = *compid;
245  // hash: compute a unique hash as a combination of the appname and the component id
246  *pid = apphash( app_name, *compid );
247  // store map of application name and the ID we just generated
248  context.appIdMap[name] = *pid;
249  int rankHere = 0;
250 #ifdef MOAB_HAVE_MPI
251  MPI_Comm_rank( *comm, &rankHere );
252 #endif
253  if( !rankHere )
254  std::cout << " application " << name << " with ID = " << *pid << " and external id: " << *compid
255  << " is registered now \n";
256  if( *compid <= 0 )
257  {
258  std::cout << " convention for external application is to have its id positive \n";
259  return moab::MB_FAILURE;
260  }
261 
262  // create now the file set that will be used for loading the model in
263  EntityHandle file_set;
264  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
265 
266  appData app_data;
267  app_data.file_set = file_set;
268  app_data.global_id = *compid; // will be used mostly for par comm graph
269  app_data.name = name; // save the name of application
270 
271 #ifdef MOAB_HAVE_TEMPESTREMAP
272  app_data.tempestData.remapper = nullptr; // Only allocate as needed
273  app_data.tempestData.num_src_ghost_layers = 0;
274  app_data.tempestData.num_tgt_ghost_layers = 0;
275 #endif
276 
277  // set some default values
278  app_data.num_ghost_layers = 0;
279  app_data.point_cloud = false;
280  app_data.is_fortran = false;
281 #ifdef MOAB_HAVE_TEMPESTREMAP
282  app_data.secondary_file_set = app_data.file_set;
283 #endif
284 
285 #ifdef MOAB_HAVE_MPI
286  if( *comm ) app_data.pcomm = new ParallelComm( context.MBI, *comm );
287 #endif
288  context.appDatas[*pid] = app_data; // it will correspond to app_FileSets[*pid] will be the file set of interest
289  return moab::MB_SUCCESS;
290 }

References GlobalContext::appDatas, apphash(), GlobalContext::appIdMap, context, moab::Interface::create_meshset(), ErrorCode, appData::file_set, appData::global_id, IMOAB_CHECKPOINTER, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MESHSET_SET, appData::name, appData::num_ghost_layers, appData::pcomm, and appData::point_cloud.

Referenced by iMOAB_RegisterApplicationFortran().

◆ iMOAB_RegisterApplicationFortran()

ErrCode iMOAB_RegisterApplicationFortran ( const iMOAB_String  app_name,
int *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 292 of file iMOAB.cpp.

298 {
299  IMOAB_CHECKPOINTER( app_name, 1 );
300 #ifdef MOAB_HAVE_MPI
301  IMOAB_CHECKPOINTER( comm, 2 );
302  IMOAB_CHECKPOINTER( compid, 3 );
303 #else
304  IMOAB_CHECKPOINTER( compid, 2 );
305 #endif
306 
307  ErrCode err;
308  assert( app_name != nullptr );
309  std::string name( app_name );
310 
311 #ifdef MOAB_HAVE_MPI
312  MPI_Comm ccomm;
313  if( comm )
314  {
315  // Convert from Fortran communicator to a C communicator
316  // NOTE: see transfer of handles at
317  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
318  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
319  }
320 #endif
321 
322  // now call C style registration function:
323  err = iMOAB_RegisterApplication( app_name,
324 #ifdef MOAB_HAVE_MPI
325  &ccomm,
326 #endif
327  compid, pid );
328 
329  // Now that we have created the application context, store that
330  // the application being registered is from a Fortran context
331  context.appDatas[*pid].is_fortran = true;
332 
333  return err;
334 }

References GlobalContext::appDatas, context, ErrCode, IMOAB_CHECKPOINTER, iMOAB_RegisterApplication(), and MOAB_HAVE_MPI.

◆ iMOAB_ResolveSharedEntities()

ErrCode iMOAB_ResolveSharedEntities ( iMOAB_AppID  pid,
int *  num_verts,
int *  marker 
)

Resolve shared entities using global markers on shared vertices.

Note
Global markers can be a global node id, for example, or a global DoF number (as for HOMME)

Operations: Collective .

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_verts(int*) Number of vertices.
[in]marker(int*) Resolving marker (global id marker).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2324 of file iMOAB.cpp.

2325 {
2326  appData& data = context.appDatas[*pid];
2327  ParallelComm* pco = context.appDatas[*pid].pcomm;
2328  EntityHandle cset = data.file_set;
2329  int dum_id = 0;
2330  ErrorCode rval;
2331  if( data.primary_elems.empty() )
2332  {
2333  // skip actual resolve, assume vertices are distributed already ,
2334  // no need to share them
2335  }
2336  else
2337  {
2338  // create an integer tag for resolving ; maybe it can be a long tag in the future
2339  // (more than 2 B vertices;)
2340 
2341  Tag stag;
2342  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2343  &dum_id );MB_CHK_ERR( rval );
2344 
2345  if( *num_verts > (int)data.local_verts.size() )
2346  {
2347  return moab::MB_FAILURE;
2348  } // we are not setting the size
2349 
2350  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2351 
2352  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2353 
2354  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2355  }
2356  // provide partition tag equal to rank
2357  Tag part_tag;
2358  dum_id = -1;
2359  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2360  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2361 
2362  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2363  {
2364  std::cout << " can't get par part tag.\n";
2365  return moab::MB_FAILURE;
2366  }
2367 
2368  int rank = pco->rank();
2369  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2370 
2371  return moab::MB_SUCCESS;
2372 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::ParallelComm::rank(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Interface::tag_delete(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_SendElementTag()

ErrCode iMOAB_SendElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  joint_communicator,
int *  context_id 
)

Definition at line 2668 of file iMOAB.cpp.

2672 {
2673  appData& data = context.appDatas[*pid];
2674  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2675  if( mt == data.pgraph.end() )
2676  {
2677  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2678  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2679  std::cout << " " << mit->first;
2680  std::cout << "\n";
2681  return moab::MB_FAILURE;
2682  }
2683 
2684  ParCommGraph* cgraph = mt->second;
2685  ParallelComm* pco = context.appDatas[*pid].pcomm;
2686  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2687  : *joint_communicator );
2688  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2689 
2690 #ifdef MOAB_HAVE_TEMPESTREMAP
2691  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2692  {
2693  EntityHandle cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2694  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2695  }
2696 #else
2697  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2698  // how do I know if this receiver already participated in an intersection driven by coupler?
2699  // also, what if this was the "source" mesh in intx?
2700  // in that case, the elements might have been instantiated in the coverage set locally, the
2701  // "owned" range can be different the elements are now in tempestRemap coverage_set
2702  EntityHandle cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2703  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2704 #endif
2705 
2706  std::string tag_name( tag_storage_name );
2707 
2708  // basically, we assume everything is defined already on the tag,
2709  // and we can get the tags just by its name
2710  // we assume that there are separators ":" between the tag names
2711  std::vector< std::string > tagNames;
2712  std::vector< Tag > tagHandles;
2713  std::string separator( ":" );
2714  split_tag_names( tag_name, separator, tagNames );
2715  for( size_t i = 0; i < tagNames.size(); i++ )
2716  {
2717  Tag tagHandle;
2718  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2719  if( MB_SUCCESS != rval || nullptr == tagHandle )
2720  {
2721  MB_CHK_SET_ERR( moab::MB_FAILURE,
2722  "can't get tag handle with name: " << tagNames[i].c_str() << " at index " << i );
2723  }
2724  tagHandles.push_back( tagHandle );
2725  }
2726 
2727  // pco is needed to pack, and for moab instance, not for communication!
2728  // still use nonblocking communication, over the joint comm
2729  MB_CHK_ERR( cgraph->send_tag_values( global, pco, owned, tagHandles ) );
2730  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2731 
2732  return moab::MB_SUCCESS;
2733 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, appData::pgraph, appData::point_cloud, moab::ParCommGraph::send_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_SendMesh()

ErrCode iMOAB_SendMesh ( iMOAB_AppID  pid,
MPI_Comm *  joint_communicator,
MPI_Group *  receivingGroup,
int *  rcompid,
int *  method 
)

Definition at line 2406 of file iMOAB.cpp.

2411 {
2412  assert( joint_communicator != nullptr );
2413  assert( receivingGroup != nullptr );
2414  assert( rcompid != nullptr );
2415 
2416  ErrorCode rval;
2417  int ierr;
2418  appData& data = context.appDatas[*pid];
2419  ParallelComm* pco = context.appDatas[*pid].pcomm;
2420 
2421  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2422  : *joint_communicator );
2423  MPI_Group recvGroup =
2424  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2425  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2426  // first see what are the processors in each group; get the sender group too, from the sender communicator
2427  MPI_Group senderGroup;
2428  ierr = MPI_Comm_group( sender, &senderGroup );
2429  if( ierr != 0 ) return moab::MB_FAILURE;
2430 
2431  // instantiate the par comm graph
2432  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2433  // int coid2)
2434  ParCommGraph* cgraph =
2435  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2436  // we should search if we have another pcomm with the same comp ids in the list already
2437  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2438  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2439 
2440  int sender_rank = -1;
2441  MPI_Comm_rank( sender, &sender_rank );
2442 
2443  // decide how to distribute elements to each processor
2444  // now, get the entities on local processor, and pack them into a buffer for various processors
2445  // we will do trivial partition: first get the total number of elements from "sender"
2446  std::vector< int > number_elems_per_part;
2447  // how to distribute local elements to receiving tasks?
2448  // trivial partition: compute first the total number of elements need to be sent
2449  Range owned = context.appDatas[*pid].owned_elems;
2450  if( owned.size() == 0 )
2451  {
2452  // must be vertices that we want to send then
2453  owned = context.appDatas[*pid].local_verts;
2454  // we should have some vertices here
2455  }
2456 
2457  if( *method == 0 ) // trivial partitioning, old method
2458  {
2459  int local_owned_elem = (int)owned.size();
2460  int size = pco->size();
2461  int rank = pco->rank();
2462  number_elems_per_part.resize( size ); //
2463  number_elems_per_part[rank] = local_owned_elem;
2464 #if( MPI_VERSION >= 2 )
2465  // Use "in place" option
2466  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2467 #else
2468  {
2469  std::vector< int > all_tmp( size );
2470  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2471  number_elems_per_part = all_tmp;
2472  }
2473 #endif
2474 
2475  if( ierr != 0 )
2476  {
2477  return moab::MB_FAILURE;
2478  }
2479 
2480  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2481  // to each sender
2482  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2483 
2484  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2485  }
2486  else // *method != 0, so it is either graph or geometric, parallel
2487  {
2488  // owned are the primary elements on this app
2489  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2490 
2491  // basically, send the graph to the receiver side, with unblocking send
2492  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2493  }
2494  // pco is needed to pack, not for communication
2495  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2496 
2497  // mark for deletion
2498  MPI_Group_free( &senderGroup );
2499  return moab::MB_SUCCESS;
2500 }

References GlobalContext::appDatas, moab::ParallelComm::comm(), moab::ParCommGraph::compute_partition(), moab::ParCommGraph::compute_trivial_partition(), context, ErrorCode, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, moab::ParallelComm::rank(), moab::ParCommGraph::send_graph(), moab::ParCommGraph::send_graph_partition(), moab::ParCommGraph::send_mesh_parts(), size, moab::Range::size(), and moab::ParallelComm::size().

◆ iMOAB_SetDoubleTagStorage()

ErrCode iMOAB_SetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Store the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 1714 of file iMOAB.cpp.

1719 {
1720  ErrorCode rval;
1721  std::string tag_names( tag_storage_names );
1722  // exactly the same code as for int tag :) maybe should check the type of tag too
1723  std::vector< std::string > tagNames;
1724  std::vector< Tag > tagHandles;
1725  std::string separator( ":" );
1726  split_tag_names( tag_names, separator, tagNames );
1727 
1728  appData& data = context.appDatas[*pid];
1729  // set it on a subset of entities, based on type and length
1730  // if *entity_type = 0, then use vertices; else elements
1731  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1732 
1733  int nents_to_be_set = (int)( *ents_to_set ).size();
1734  int position = 0;
1735  for( size_t i = 0; i < tagNames.size(); i++ )
1736  {
1737  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1738  {
1739  return moab::MB_FAILURE;
1740  } // some tag not defined yet in the app
1741 
1742  Tag tag = data.tagMap[tagNames[i]];
1743 
1744  int tagLength = 0;
1745  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1746 
1747  DataType dtype;
1748  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1749 
1750  if( dtype != MB_TYPE_DOUBLE )
1751  {
1752  return moab::MB_FAILURE;
1753  }
1754 
1755  // set it on the subset of entities, based on type and length
1756  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1757  return moab::MB_FAILURE; // too many entity values to be set
1758 
1759  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1760  // increment position to next tag
1761  position = position + tagLength * nents_to_be_set;
1762  }
1763  return moab::MB_SUCCESS; // no error
1764 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SetDoubleTagStorageWithGid()

ErrCode iMOAB_SetDoubleTagStorageWithGid ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_names,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data,
int *  globalIds 
)

Store the specified values in a MOAB double Tag, for given ids.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[in]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be permuted over the local set of visible entities (either vertices or elements). unrolled by tags in parallel, might be different order, or different entities on the task
[in]globalIdsglobal ids of the cells to be set;
Returns
ErrCode The error code indicating success or failure.

Definition at line 1766 of file iMOAB.cpp.

1772 {
1773  ErrorCode rval;
1774  std::string tag_names( tag_storage_names );
1775  // exactly the same code as for int tag :) maybe should check the type of tag too
1776  std::vector< std::string > tagNames;
1777  std::vector< Tag > tagHandles;
1778  std::string separator( ":" );
1779  split_tag_names( tag_names, separator, tagNames );
1780 
1781  appData& data = context.appDatas[*pid];
1782  // set it on a subset of entities, based on type and length
1783  // if *entity_type = 0, then use vertices; else elements
1784  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1785  int nents_to_be_set = (int)( *ents_to_set ).size();
1786 
1787  Tag gidTag = context.MBI->globalId_tag();
1788  std::vector< int > gids;
1789  gids.resize( nents_to_be_set );
1790  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1791 
1792  // so we will need to set the tags according to the global id passed;
1793  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1794  // in local range is gids
1795  std::map< int, EntityHandle > eh_by_gid;
1796  int i = 0;
1797  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1798  {
1799  eh_by_gid[gids[i]] = *it;
1800  }
1801  // TODO: allow for tags of different length
1802  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1803  // check global ids to have different values
1804  std::set< int > globalIdsSet;
1805  for( int j = 0; j < nbLocalVals; j++ )
1806  globalIdsSet.insert( globalIds[j] );
1807  if( globalIdsSet.size() < nbLocalVals )
1808  {
1809  std::cout << "iMOAB_SetDoubleTagStorageWithGid: for pid:" << *pid << " tags[0]:" << tagNames[0]
1810  << " global ids passed are not unique, major error\n";
1811  std::cout << " nbLocalVals:" << nbLocalVals << " globalIdsSet.size():" << globalIdsSet.size()
1812  << " first global id:" << globalIds[0] << "\n";
1813  return moab::MB_FAILURE;
1814  }
1815 
1816  std::vector< int > tagLengths( tagNames.size() );
1817  std::vector< Tag > tagList;
1818  size_t total_tag_len = 0;
1819  for( size_t i = 0; i < tagNames.size(); i++ )
1820  {
1821  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1822  {
1823  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1824  } // some tag not defined yet in the app
1825 
1826  Tag tag = data.tagMap[tagNames[i]];
1827  tagList.push_back( tag );
1828 
1829  int tagLength = 0;
1830  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1831 
1832  total_tag_len += tagLength;
1833  tagLengths[i] = tagLength;
1834  DataType dtype;
1835  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1836 
1837  if( dtype != MB_TYPE_DOUBLE )
1838  {
1839  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1840  }
1841  }
1842  bool serial = true;
1843 #ifdef MOAB_HAVE_MPI
1844  ParallelComm* pco = context.appDatas[*pid].pcomm;
1845  unsigned num_procs = pco->size();
1846  if( num_procs > 1 ) serial = false;
1847 #endif
1848 
1849  if( serial )
1850  {
1851  // we do not assume anymore that the number of entities has to match
1852  // we will set only what matches, and skip entities that do not have corresponding global ids
1853  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1854  // tags are unrolled, we loop over global ids first, then careful about tags
1855  for( int i = 0; i < nents_to_be_set; i++ )
1856  {
1857  int gid = globalIds[i];
1858  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1859  if( mapIt == eh_by_gid.end() ) continue;
1860  EntityHandle eh = mapIt->second;
1861  // now loop over tags
1862  int indexInTagValues = 0; //
1863  for( size_t j = 0; j < tagList.size(); j++ )
1864  {
1865  indexInTagValues += i * tagLengths[j];
1866  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1867  // advance the pointer/index
1868  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1869  }
1870  }
1871  }
1872 #ifdef MOAB_HAVE_MPI
1873  else // it can be not serial only if pco->size() > 1, parallel
1874  {
1875  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1876  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1877  // it is a 2-hop global gather scatter
1878  // we do not expect the sizes to match
1879  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1880  TupleList TLsend;
1881  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1882  TLsend.enableWriteAccess();
1883  // the processor id that processes global_id is global_id / num_ents_per_proc
1884 
1885  int indexInRealLocal = 0;
1886  for( int i = 0; i < nbLocalVals; i++ )
1887  {
1888  // to proc, marker, element local index, index in el
1889  int marker = globalIds[i];
1890  int to_proc = marker % num_procs;
1891  int n = TLsend.get_n();
1892  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1893  TLsend.vi_wr[2 * n + 1] = marker;
1894  int indexInTagValues = 0;
1895  // tag data collect by number of tags
1896  for( size_t j = 0; j < tagList.size(); j++ )
1897  {
1898  indexInTagValues += i * tagLengths[j];
1899  for( int k = 0; k < tagLengths[j]; k++ )
1900  {
1901  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1902  }
1903  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1904  }
1905  TLsend.inc_n();
1906  }
1907 
1908  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1909  // send now requests, basically inform the rendez-vous point who needs a particular global id
1910  // send the data to the other processors:
1911  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1912  TupleList TLreq;
1913  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1914  TLreq.enableWriteAccess();
1915  for( int i = 0; i < nents_to_be_set; i++ )
1916  {
1917  // to proc, marker
1918  int marker = gids[i];
1919  int to_proc = marker % num_procs;
1920  int n = TLreq.get_n();
1921  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1922  TLreq.vi_wr[2 * n + 1] = marker;
1923  // tag data collect by number of tags
1924  TLreq.inc_n();
1925  }
1926  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1927 
1928  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1929  // we should first order by global id, and then build the new TL with send to proc, global id and
1930  // tags for it
1931  // sort by global ids the tuple lists
1932  moab::TupleList::buffer sort_buffer;
1933  sort_buffer.buffer_init( TLreq.get_n() );
1934  TLreq.sort( 1, &sort_buffer );
1935  sort_buffer.reset();
1936  sort_buffer.buffer_init( TLsend.get_n() );
1937  TLsend.sort( 1, &sort_buffer );
1938  sort_buffer.reset();
1939  // now send the tag values to the proc that requested it
1940  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1941  // each dof should have exactly one target proc. Is that true or not in general ?
1942  // how do we plan to use this? Is it better to store the comm graph for future
1943 
1944  // start copy from comm graph settle
1945  TupleList TLBack;
1946  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1947  TLBack.enableWriteAccess();
1948 
1949  int n1 = TLreq.get_n();
1950  int n2 = TLsend.get_n();
1951 
1952  int indexInTLreq = 0;
1953  int indexInTLsend = 0; // advance both, according to the marker
1954  if( n1 > 0 && n2 > 0 )
1955  {
1956 
1957  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1958  {
1959  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1960  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1961  if( currentValue1 < currentValue2 )
1962  {
1963  // we have a big problem; basically, we are saying that
1964  // dof currentValue is on one model and not on the other
1965  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1966  indexInTLreq++;
1967  continue;
1968  }
1969  if( currentValue1 > currentValue2 )
1970  {
1971  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1972  indexInTLsend++;
1973  continue;
1974  }
1975  int size1 = 1;
1976  int size2 = 1;
1977  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1978  size1++;
1979  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1980  size2++;
1981  // must be found in both lists, find the start and end indices
1982  for( int i1 = 0; i1 < size1; i1++ )
1983  {
1984  for( int i2 = 0; i2 < size2; i2++ )
1985  {
1986  // send the info back to components
1987  int n = TLBack.get_n();
1988  TLBack.reserve();
1989  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1990  // came from, info from comp2
1991  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1992  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1993  // also fill tag values
1994  for( size_t k = 0; k < total_tag_len; k++ )
1995  {
1996  TLBack.vr_rd[total_tag_len * n + k] =
1997  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
1998  }
1999  }
2000  }
2001  indexInTLreq += size1;
2002  indexInTLsend += size2;
2003  }
2004  }
2005  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2006  // end copy from comm graph
2007  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2008  n1 = TLBack.get_n();
2009  double* ptrVal = &TLBack.vr_rd[0]; //
2010  for( int i = 0; i < n1; i++ )
2011  {
2012  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2013  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2014  if( mapIt == eh_by_gid.end() ) continue;
2015  EntityHandle eh = mapIt->second;
2016  // now loop over tags
2017 
2018  for( size_t j = 0; j < tagList.size(); j++ )
2019  {
2020  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2021  // advance the pointer/index
2022  ptrVal += tagLengths[j]; // at the end of tag data per call
2023  }
2024  }
2025  }
2026 #endif
2027  return MB_SUCCESS;
2028 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), moab::Range::end(), ErrorCode, moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SET_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::ParallelComm::proc_config(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::Range::size(), moab::ParallelComm::size(), moab::TupleList::sort(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), appData::tagMap, moab::TupleList::vi_rd, moab::TupleList::vi_wr, moab::TupleList::vr_rd, and moab::TupleList::vr_wr.

◆ iMOAB_SetGlobalInfo()

ErrCode iMOAB_SetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2298 of file iMOAB.cpp.

2299 {
2300  appData& data = context.appDatas[*pid];
2301  data.num_global_vertices = *num_global_verts;
2302  data.num_global_elements = *num_global_elems;
2303  return moab::MB_SUCCESS;
2304 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

Referenced by iMOAB_UpdateMeshInfo().

◆ iMOAB_SetIntTagStorage()

ErrCode iMOAB_SetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Store the specified values in a MOAB integer Tag.

Values are set on vertices or elements, depending on entity_type

Note
we could change the api to accept as input the tag index, as returned by iMOAB_DefineTagStorage.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1627 of file iMOAB.cpp.

1632 {
1633  std::string tag_name( tag_storage_name );
1634 
1635  // Get the application data
1636  appData& data = context.appDatas[*pid];
1637  // check if tag is defined
1638  if( data.tagMap.find( tag_name ) == data.tagMap.end() ) return moab::MB_FAILURE;
1639 
1640  Tag tag = data.tagMap[tag_name];
1641 
1642  int tagLength = 0;
1643  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1644 
1645  DataType dtype;
1646  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1647 
1648  if( dtype != MB_TYPE_INTEGER )
1649  {
1650  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1651  }
1652 
1653  // set it on a subset of entities, based on type and length
1654  // if *entity_type = 0, then use vertices; else elements
1655  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1656  int nents_to_be_set = *num_tag_storage_length / tagLength;
1657 
1658  if( nents_to_be_set > (int)ents_to_set->size() )
1659  {
1660  return moab::MB_FAILURE;
1661  } // to many entities to be set or too few
1662 
1663  // now set the tag data
1664  MB_CHK_ERR( context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data ) );
1665 
1666  return moab::MB_SUCCESS; // no error
1667 }

References appData::all_verts, GlobalContext::appDatas, context, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SynchronizeTags()

ErrCode iMOAB_SynchronizeTags ( iMOAB_AppID  pid,
int *  num_tag,
int *  tag_indices,
int *  ent_type 
)

Exchange tag values for the given tags across process boundaries.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_tags(int*) Number of tags to exchange.
[in]tag_indices(int*) Array with tag indices of interest (size = *num_tags).
[in]ent_type(int*) The type of entity for tag exchange.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2089 of file iMOAB.cpp.

2090 {
2091 #ifdef MOAB_HAVE_MPI
2092  appData& data = context.appDatas[*pid];
2093  Range ent_exchange;
2094  std::vector< Tag > tags;
2095 
2096  for( int i = 0; i < *num_tag; i++ )
2097  {
2098  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2099  {
2100  return moab::MB_FAILURE;
2101  } // error in tag index
2102 
2103  tags.push_back( data.tagList[tag_indices[i]] );
2104  }
2105 
2106  if( *ent_type == 0 )
2107  {
2108  ent_exchange = data.all_verts;
2109  }
2110  else if( *ent_type == 1 )
2111  {
2112  ent_exchange = data.primary_elems;
2113  }
2114  else
2115  {
2116  return moab::MB_FAILURE;
2117  } // unexpected type
2118 
2119  ParallelComm* pco = context.appDatas[*pid].pcomm;
2120 
2121  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2122 
2123 #else
2124  /* do nothing if serial */
2125  // just silence the warning
2126  // do not call sync tags in serial!
2127  int k = *pid + *num_tag + *tag_indices + *ent_type;
2128  k++;
2129 #endif
2130 
2131  return moab::MB_SUCCESS;
2132 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::ParallelComm::exchange_tags(), MB_CHK_ERR, MB_SUCCESS, appData::primary_elems, and appData::tagList.

◆ iMOAB_UpdateMeshInfo()

ErrCode iMOAB_UpdateMeshInfo ( iMOAB_AppID  pid)

Update local mesh data structure, from file information.

Note
The method should be called after mesh modifications, for example reading a file or creating mesh in memory

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 785 of file iMOAB.cpp.

786 {
787  // this will include ghost elements info
788  appData& data = context.appDatas[*pid];
789  EntityHandle fileSet = data.file_set;
790 
791  // first clear all data ranges; this can be called after ghosting
792  data.all_verts.clear();
793  data.primary_elems.clear();
794  data.local_verts.clear();
795  data.owned_verts.clear();
796  data.ghost_vertices.clear();
797  data.owned_elems.clear();
798  data.ghost_elems.clear();
799  data.mat_sets.clear();
800  data.neu_sets.clear();
801  data.diri_sets.clear();
802 
803  // Let us get all the vertex entities
804  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
805 
806  // Let us check first entities of dimension = 3
807  data.dimension = 3;
808  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
809 
810  if( data.primary_elems.empty() )
811  {
812  // Now 3-D elements. Let us check entities of dimension = 2
813  data.dimension = 2;
814  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
815 
816  if( data.primary_elems.empty() )
817  {
818  // Now 3-D/2-D elements. Let us check entities of dimension = 1
819  data.dimension = 1;
820  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
821 
822  if( data.primary_elems.empty() )
823  {
824  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
825  data.dimension = 0;
826  }
827  }
828  }
829 
830  // check if the current mesh is just a point cloud
831  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
832 
833 #ifdef MOAB_HAVE_MPI
834 
836  {
837  ParallelComm* pco = context.appDatas[*pid].pcomm;
838 
839  // filter ghost vertices, from local
840  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
841 
842  // Store handles for all ghosted entities
843  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
844 
845  // filter ghost elements, from local
846  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
847 
848  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
849  // now update global number of primary cells and global number of vertices
850  // determine first number of owned vertices
851  // Get local owned vertices
852  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
853  int local[2], global[2];
854  local[0] = data.owned_verts.size();
855  local[1] = data.owned_elems.size();
856  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
857  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
858  }
859  else
860  {
861  data.local_verts = data.all_verts;
862  data.owned_elems = data.primary_elems;
863  }
864 
865 #else
866 
867  data.local_verts = data.all_verts;
868  data.owned_elems = data.primary_elems;
869 
870 #endif
871 
872  // Get the references for some standard internal tags such as material blocks, BCs, etc
874  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
875 
877  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
878 
880  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
881 
882  return moab::MB_SUCCESS;
883 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::clear(), moab::ParallelComm::comm(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::empty(), ErrorCode, appData::file_set, moab::ParallelComm::filter_pstatus(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, iMOAB_SetGlobalInfo(), appData::local_verts, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, GlobalContext::MPI_initialized, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::owned_verts, appData::point_cloud, appData::primary_elems, PSTATUS_GHOST, PSTATUS_NOT, PSTATUS_NOT_OWNED, moab::Range::size(), moab::subtract(), and moab::Interface::UNION.

Referenced by iMOAB_DetermineGhostEntities(), iMOAB_LoadMesh(), iMOAB_MergeVertices(), and iMOAB_ReceiveMesh().

◆ iMOAB_WriteLocalMesh()

ErrCode iMOAB_WriteLocalMesh ( iMOAB_AppID  pid,
iMOAB_String  prefix 
)

Write a local MOAB mesh copy.

Note
The interface will write one file per task. Only the local mesh set and solution data associated to the application will be written to the file. Very convenient method used for debugging.

Operations: Not Collective (independent operation).

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]prefix(iMOAB_String) The MOAB file prefix that will be used with task ID in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 766 of file iMOAB.cpp.

767 {
768  IMOAB_CHECKPOINTER( prefix, 2 );
769  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
770 
771  std::ostringstream file_name;
772  int rank = 0, size = 1;
773 #ifdef MOAB_HAVE_MPI
774  ParallelComm* pcomm = context.appDatas[*pid].pcomm;
775  rank = pcomm->rank();
776  size = pcomm->size();
777 #endif
778  file_name << prefix << "_" << size << "_" << rank << ".h5m";
779  // Now let us actually write the file to disk with appropriate options
780  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
781 
782  return moab::MB_SUCCESS;
783 }

References GlobalContext::appDatas, context, ErrorCode, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::ParallelComm::rank(), size, moab::ParallelComm::size(), and moab::Interface::write_file().

◆ iMOAB_WriteMesh()

ErrCode iMOAB_WriteMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  write_options 
)

Write a MOAB mesh along with the solution tags to a file.

Note
The interface will write one single file (H5M) and for serial files (VTK/Exodus), it will write one file per task. Write options include parallel write options, if needed. Only the mesh set and solution data associated to the application will be written to the file.

Operations: Collective for parallel write, non collective for serial write.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to write all the entities contained in the internal application mesh set.
[in]write_options(iMOAB_String) Additional options for writing the MOAB mesh in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 761 of file iMOAB.cpp.

762 {
763  return internal_WriteMesh( pid, filename, write_options );
764 }

References internal_WriteMesh().

◆ internal_WriteMesh()

static ErrCode internal_WriteMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  write_options,
bool  primary_set = true 
)
static

Definition at line 679 of file iMOAB.cpp.

683 {
684  IMOAB_CHECKPOINTER( filename, 2 );
685  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
686 
687  appData& data = context.appDatas[*pid];
688  EntityHandle fileSet = data.file_set;
689 
690  std::ostringstream newopts;
691 #ifdef MOAB_HAVE_MPI
692  std::string write_opts( ( write_options ? write_options : "" ) );
693  std::string pcid( "PARALLEL_COMM=" );
694 
695  if( write_opts.find( pcid ) != std::string::npos )
696  {
697  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
698  return moab::MB_FAILURE;
699  }
700 
701  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
702  std::string pw( "PARALLEL=WRITE_PART" );
703  if( write_opts.find( pw ) != std::string::npos )
704  {
705  ParallelComm* pco = data.pcomm;
706  newopts << "PARALLEL_COMM=" << pco->get_id() << ";";
707  }
708 
709 #endif
710 
711 #ifdef MOAB_HAVE_TEMPESTREMAP
712  if( !primary_set )
713  {
714  if( data.tempestData.remapper != nullptr )
715  fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
716  else if( data.file_set != data.secondary_file_set )
717  fileSet = data.secondary_file_set;
718  else
719  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" );
720  }
721 #endif
722 
723  // append user write options to the one we have built
724  if( write_options ) newopts << write_options;
725 
726  std::vector< Tag > copyTagList = data.tagList;
727  // append Global ID and Parallel Partition
728  std::string gid_name_tag( "GLOBAL_ID" );
729 
730  // export global id tag, we need it always
731  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
732  {
733  Tag gid = context.MBI->globalId_tag();
734  copyTagList.push_back( gid );
735  }
736  // also Parallel_Partition PARALLEL_PARTITION
737  std::string pp_name_tag( "PARALLEL_PARTITION" );
738 
739  // write parallel part tag too, if it exists
740  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
741  {
742  Tag ptag;
743  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
744  if( ptag ) copyTagList.push_back( ptag );
745  }
746 
747  // Now let us actually write the file to disk with appropriate options
748  if( primary_set )
749  {
750  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, copyTagList.data(),
751  copyTagList.size() ) );
752  }
753  else
754  {
755  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1 ) );
756  }
757 
758  return moab::MB_SUCCESS;
759 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, appData::file_set, moab::ParallelComm::get_id(), moab::Interface::globalId_tag(), IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::pcomm, moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and moab::Interface::write_file().

Referenced by iMOAB_WriteMesh().

◆ split_tag_names()

static void split_tag_names ( std::string  input_names,
std::string &  separator,
std::vector< std::string > &  list_tag_names 
)
static

Definition at line 1475 of file iMOAB.cpp.

1478 {
1479  size_t pos = 0;
1480  std::string token;
1481  while( ( pos = input_names.find( separator ) ) != std::string::npos )
1482  {
1483  token = input_names.substr( 0, pos );
1484  if( !token.empty() ) list_tag_names.push_back( token );
1485  // std::cout << token << std::endl;
1486  input_names.erase( 0, pos + separator.length() );
1487  }
1488  if( !input_names.empty() )
1489  {
1490  // if leftover something, or if not ended with delimiter
1491  list_tag_names.push_back( input_names );
1492  }
1493  return;
1494 }

Referenced by iMOAB_DefineTagStorage(), iMOAB_GetDoubleTagStorage(), iMOAB_ReceiveElementTag(), iMOAB_SendElementTag(), iMOAB_SetDoubleTagStorage(), and iMOAB_SetDoubleTagStorageWithGid().

Variable Documentation

◆ context