Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp File Reference
#include "moab/MOABConfig.h"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "moab/ParCommGraph.hpp"
#include "moab/ParallelMergeMesh.hpp"
#include "moab/IntxMesh/IntxUtils.hpp"
#include "DebugOutput.hpp"
#include "moab/iMOAB.h"
#include "mhdf.h"
#include <H5Tpublic.h>
#include "moab/CartVect.hpp"
#include "MBTagConventions.hpp"
#include "moab/MeshTopoUtil.hpp"
#include "moab/ReadUtilIface.hpp"
#include "moab/MergeMesh.hpp"
#include <cassert>
#include <sstream>
#include <iostream>
+ Include dependency graph for iMOAB.cpp:

Go to the source code of this file.

Classes

struct  appData
 
struct  GlobalContext
 

Functions

ErrCode iMOAB_Initialize (int argc, iMOAB_String *argv)
 Initialize the iMOAB interface implementation. More...
 
ErrCode iMOAB_InitializeFortran ()
 Initialize the iMOAB interface implementation from Fortran driver. More...
 
ErrCode iMOAB_Finalize ()
 Finalize the iMOAB interface implementation. More...
 
static int apphash (const iMOAB_String str, int identifier=0)
 
ErrCode iMOAB_RegisterApplication (const iMOAB_String app_name, MPI_Comm *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_RegisterApplicationFortran (const iMOAB_String app_name, int *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_DeregisterApplication (iMOAB_AppID pid)
 De-Register application: delete mesh (set) associated with the application ID. More...
 
ErrCode iMOAB_DeregisterApplicationFortran (iMOAB_AppID pid)
 De-Register the Fortran based application: delete mesh (set) associated with the application ID. More...
 
ErrCode iMOAB_ReadHeaderInfo (const iMOAB_String filename, int *num_global_vertices, int *num_global_elements, int *num_dimension, int *num_parts)
 It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file. More...
 
ErrCode iMOAB_LoadMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options, int *num_ghost_layers)
 Load a MOAB mesh file in parallel and exchange ghost layers as requested. More...
 
static ErrCode internal_WriteMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options, bool primary_set=true)
 
ErrCode iMOAB_WriteMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options)
 Write a MOAB mesh along with the solution tags to a file. More...
 
ErrCode iMOAB_WriteLocalMesh (iMOAB_AppID pid, iMOAB_String prefix)
 Write a local MOAB mesh copy. More...
 
ErrCode iMOAB_UpdateMeshInfo (iMOAB_AppID pid)
 Update local mesh data structure, from file information. More...
 
ErrCode iMOAB_GetMeshInfo (iMOAB_AppID pid, int *num_visible_vertices, int *num_visible_elements, int *num_visible_blocks, int *num_visible_surfaceBC, int *num_visible_vertexBC)
 Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions. More...
 
ErrCode iMOAB_GetVertexID (iMOAB_AppID pid, int *vertices_length, iMOAB_GlobalID *global_vertex_ID)
 Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices. More...
 
ErrCode iMOAB_GetVertexOwnership (iMOAB_AppID pid, int *vertices_length, int *visible_global_rank_ID)
 Get vertex ownership information. More...
 
ErrCode iMOAB_GetVisibleVerticesCoordinates (iMOAB_AppID pid, int *coords_length, double *coordinates)
 Get vertex coordinates for all local (owned and ghosted) vertices. More...
 
ErrCode iMOAB_GetBlockID (iMOAB_AppID pid, int *block_length, iMOAB_GlobalID *global_block_IDs)
 Get the global block IDs for all locally visible (owned and shared/ghosted) blocks. More...
 
ErrCode iMOAB_GetBlockInfo (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *vertices_per_element, int *num_elements_in_block)
 Get the global block information and number of visible elements of belonging to a block (MATERIAL SET). More...
 
ErrCode iMOAB_GetVisibleElementsInfo (iMOAB_AppID pid, int *num_visible_elements, iMOAB_GlobalID *element_global_IDs, int *ranks, iMOAB_GlobalID *block_IDs)
 Get the visible elements information. More...
 
ErrCode iMOAB_GetBlockElementConnectivities (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *connectivity_length, int *element_connectivity)
 Get the connectivities for elements contained within a certain block. More...
 
ErrCode iMOAB_GetElementConnectivity (iMOAB_AppID pid, iMOAB_LocalID *elem_index, int *connectivity_length, int *element_connectivity)
 Get the connectivity for one element only. More...
 
ErrCode iMOAB_GetElementOwnership (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, int *element_ownership)
 Get the element ownership within a certain block i.e., processor ID of the element owner. More...
 
ErrCode iMOAB_GetElementID (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, iMOAB_GlobalID *global_element_ID, iMOAB_LocalID *local_element_ID)
 Get the global IDs for all locally visible elements belonging to a particular block. More...
 
ErrCode iMOAB_GetPointerToSurfaceBC (iMOAB_AppID pid, int *surface_BC_length, iMOAB_LocalID *local_element_ID, int *reference_surface_ID, int *boundary_condition_value)
 Get the surface boundary condition information. More...
 
ErrCode iMOAB_GetPointerToVertexBC (iMOAB_AppID pid, int *vertex_BC_length, iMOAB_LocalID *local_vertex_ID, int *boundary_condition_value)
 Get the vertex boundary condition information. More...
 
static void split_tag_names (std::string input_names, std::string &separator, std::vector< std::string > &list_tag_names)
 
ErrCode iMOAB_DefineTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *tag_type, int *components_per_entity, int *tag_index)
 Define a MOAB Tag corresponding to the application depending on requested types. More...
 
ErrCode iMOAB_SetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Store the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_GetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Get the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Store the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorageWithGid (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data, int *globalIds)
 Store the specified values in a MOAB double Tag, for given ids. More...
 
ErrCode iMOAB_GetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Retrieve the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SynchronizeTags (iMOAB_AppID pid, int *num_tag, int *tag_indices, int *ent_type)
 Exchange tag values for the given tags across process boundaries. More...
 
ErrCode iMOAB_ReduceTagsMax (iMOAB_AppID pid, int *tag_index, int *ent_type)
 Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index. More...
 
ErrCode iMOAB_GetNeighborElements (iMOAB_AppID pid, iMOAB_LocalID *local_index, int *num_adjacent_elements, iMOAB_LocalID *adjacent_element_IDs)
 Retrieve the adjacencies for the element entities. More...
 
ErrCode iMOAB_CreateVertices (iMOAB_AppID pid, int *coords_len, int *dim, double *coordinates)
 Create vertices for an app; it assumes no other vertices. More...
 
ErrCode iMOAB_CreateElements (iMOAB_AppID pid, int *num_elem, int *type, int *num_nodes_per_element, int *connectivity, int *block_ID)
 Create elements for an app; it assumes connectivity from local vertices, in order. More...
 
ErrCode iMOAB_SetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce. More...
 
ErrCode iMOAB_GetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Get global information about number of vertices and number of elements. More...
 
ErrCode iMOAB_ResolveSharedEntities (iMOAB_AppID pid, int *num_verts, int *marker)
 Resolve shared entities using global markers on shared vertices. More...
 
ErrCode iMOAB_DetermineGhostEntities (iMOAB_AppID pid, int *ghost_dim, int *num_ghost_layers, int *bridge_dim)
 Create the requested number of ghost layers for the parallel mesh. More...
 
ErrCode iMOAB_SendMesh (iMOAB_AppID pid, MPI_Comm *joint_communicator, MPI_Group *receivingGroup, int *rcompid, int *method)
 
ErrCode iMOAB_ReceiveMesh (iMOAB_AppID pid, MPI_Comm *joint_communicator, MPI_Group *sendingGroup, int *scompid)
 
ErrCode iMOAB_SendElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *joint_communicator, int *context_id)
 
ErrCode iMOAB_ReceiveElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *joint_communicator, int *context_id)
 
ErrCode iMOAB_FreeSenderBuffers (iMOAB_AppID pid, int *context_id)
 
ErrCode iMOAB_ComputeCommGraph (iMOAB_AppID pid1, iMOAB_AppID pid2, MPI_Comm *joint_communicator, MPI_Group *group1, MPI_Group *group2, int *type1, int *type2, int *comp1, int *comp2)
 
ErrCode iMOAB_MergeVertices (iMOAB_AppID pid)
 

Variables

static struct GlobalContext context
 

Function Documentation

◆ apphash()

static int apphash ( const iMOAB_String  str,
int  identifier = 0 
)
static

Definition at line 194 of file iMOAB.cpp.

195 {
196  // A minimal perfect hash function (MPHF)
197  std::string appstr( str );
198  // Fowler–Noll–Vo (or FNV) is a non-cryptographic hash function created
199  // by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo.
200  // Ref: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
201  unsigned int h = 2166136261u; // FNV offset basis
202  for( char c : appstr )
203  {
204  h ^= static_cast< unsigned char >( c );
205  h *= 16777619u; // FNV prime
206  }
207  // Incorporate the integer into the hash
208  if( identifier )
209  {
210  h ^= static_cast< unsigned int >( identifier & 0xFFFFFFFF );
211  h *= 16777619u; // FNV prime again
212  }
213  return static_cast< int >( h & 0x7FFFFFFF ); // Mask to ensure positive value
214 }

Referenced by iMOAB_RegisterApplication().

◆ iMOAB_ComputeCommGraph()

ErrCode iMOAB_ComputeCommGraph ( iMOAB_AppID  pid1,
iMOAB_AppID  pid2,
MPI_Comm *  joint_communicator,
MPI_Group *  group1,
MPI_Group *  group2,
int *  type1,
int *  type2,
int *  comp1,
int *  comp2 
)

Definition at line 2794 of file iMOAB.cpp.

2803 {
2804  assert( joint_communicator );
2805  assert( group1 );
2806  assert( group2 );
2807  ErrorCode rval = MB_SUCCESS;
2808  int localRank = 0, numProcs = 1;
2809 
2810  bool isFortran = false;
2811  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2812  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2813 
2814  MPI_Comm global =
2815  ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) : *joint_communicator );
2816  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2817  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2818 
2819  MPI_Comm_rank( global, &localRank );
2820  MPI_Comm_size( global, &numProcs );
2821  // instantiate the par comm graph
2822 
2823  // we should search if we have another pcomm with the same comp ids in the list already
2824  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2825  ParCommGraph* cgraph = nullptr;
2826  ParCommGraph* cgraph_rev = nullptr;
2827  if( *pid1 >= 0 )
2828  {
2829  appData& data = context.appDatas[*pid1];
2830  auto mt = data.pgraph.find( *comp2 );
2831  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2832  // now let us compute the new communication graph
2833  cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2834  context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2835  }
2836  if( *pid2 >= 0 )
2837  {
2838  appData& data = context.appDatas[*pid2];
2839  auto mt = data.pgraph.find( *comp1 );
2840  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2841  // now let us compute the new communication graph
2842  cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2843  context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2844  }
2845 
2846  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2847  // model on the joint comm
2848  TupleList TLcomp1;
2849  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2850  TupleList TLcomp2;
2851  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2852  // will push_back a new tuple, if needed
2853 
2854  TLcomp1.enableWriteAccess();
2855 
2856  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2857  Tag gdsTag;
2858  // find the values on first cell
2859  int lenTagType1 = 1;
2860  if( 1 == *type1 || 1 == *type2 )
2861  {
2862  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2863  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2864  }
2865  Tag gidTag = context.MBI->globalId_tag();
2866 
2867  std::vector< int > valuesComp1;
2868  // populate first tuple
2869  if( *pid1 >= 0 )
2870  {
2871  appData& data1 = context.appDatas[*pid1];
2872  EntityHandle fset1 = data1.file_set;
2873  // in case of tempest remap, get the coverage set
2874 #ifdef MOAB_HAVE_TEMPESTREMAP
2875  if( data1.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2876  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2877 #endif
2878  Range ents_of_interest;
2879  if( *type1 == 1 )
2880  {
2881  assert( gdsTag );
2882  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2883  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2884  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2885  }
2886  else if( *type1 == 2 )
2887  {
2888  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2889  valuesComp1.resize( ents_of_interest.size() );
2890  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2891  }
2892  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2893  {
2894  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2895  valuesComp1.resize( ents_of_interest.size() );
2896  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2897  }
2898  else
2899  {
2900  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2901  }
2902  // now fill the tuple list with info and markers
2903  // because we will send only the ids, order and compress the list
2904  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2905  TLcomp1.resize( uniq.size() );
2906  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2907  {
2908  // to proc, marker, element local index, index in el
2909  int marker = *sit;
2910  int to_proc = marker % numProcs;
2911  int n = TLcomp1.get_n();
2912  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2913  TLcomp1.vi_wr[2 * n + 1] = marker;
2914  TLcomp1.inc_n();
2915  }
2916  }
2917 
2918  ProcConfig pc( global ); // proc config does the crystal router
2919  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2920  0 ); // communication towards joint tasks, with markers
2921  // sort by value (key 1)
2922 #ifdef VERBOSE
2923  std::stringstream ff1;
2924  ff1 << "TLcomp1_" << localRank << ".txt";
2925  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2926 #endif
2927  moab::TupleList::buffer sort_buffer;
2928  sort_buffer.buffer_init( TLcomp1.get_n() );
2929  TLcomp1.sort( 1, &sort_buffer );
2930  sort_buffer.reset();
2931 #ifdef VERBOSE
2932  // after sorting
2933  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2934 #endif
2935  // do the same, for the other component, number2, with type2
2936  // start copy
2937  TLcomp2.enableWriteAccess();
2938  // populate second tuple
2939  std::vector< int > valuesComp2;
2940  if( *pid2 >= 0 )
2941  {
2942  appData& data2 = context.appDatas[*pid2];
2943  EntityHandle fset2 = data2.file_set;
2944  // in case of tempest remap, get the coverage set
2945 #ifdef MOAB_HAVE_TEMPESTREMAP
2946  if( data2.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2947  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2948 #endif
2949 
2950  Range ents_of_interest;
2951  if( *type2 == 1 )
2952  {
2953  assert( gdsTag );
2954  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2955  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2956  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2957  }
2958  else if( *type2 == 2 )
2959  {
2960  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2961  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2962  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2963  }
2964  else if( *type2 == 3 )
2965  {
2966  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
2967  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2968  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2969  }
2970  else
2971  {
2972  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
2973  }
2974  // now fill the tuple list with info and markers
2975  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
2976  TLcomp2.resize( uniq.size() );
2977  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2978  {
2979  // to proc, marker, element local index, index in el
2980  int marker = *sit;
2981  int to_proc = marker % numProcs;
2982  int n = TLcomp2.get_n();
2983  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
2984  TLcomp2.vi_wr[2 * n + 1] = marker;
2985  TLcomp2.inc_n();
2986  }
2987  }
2988  pc.crystal_router()->gs_transfer( 1, TLcomp2,
2989  0 ); // communication towards joint tasks, with markers
2990  // sort by value (key 1)
2991 #ifdef VERBOSE
2992  std::stringstream ff2;
2993  ff2 << "TLcomp2_" << localRank << ".txt";
2994  TLcomp2.print_to_file( ff2.str().c_str() );
2995 #endif
2996  sort_buffer.buffer_reserve( TLcomp2.get_n() );
2997  TLcomp2.sort( 1, &sort_buffer );
2998  sort_buffer.reset();
2999  // end copy
3000 #ifdef VERBOSE
3001  TLcomp2.print_to_file( ff2.str().c_str() );
3002 #endif
3003  // need to send back the info, from the rendezvous point, for each of the values
3004  /* so go over each value, on local process in joint communicator
3005 
3006  now have to send back the info needed for communication;
3007  loop in in sync over both TLComp1 and TLComp2, in local process;
3008  So, build new tuple lists, to send synchronous communication
3009  populate them at the same time, based on marker, that is indexed
3010  */
3011 
3012  TupleList TLBackToComp1;
3013  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3014  TLBackToComp1.enableWriteAccess();
3015 
3016  TupleList TLBackToComp2;
3017  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3018  TLBackToComp2.enableWriteAccess();
3019 
3020  int n1 = TLcomp1.get_n();
3021  int n2 = TLcomp2.get_n();
3022 
3023  int indexInTLComp1 = 0;
3024  int indexInTLComp2 = 0; // advance both, according to the marker
3025  if( n1 > 0 && n2 > 0 )
3026  {
3027  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3028  {
3029  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3030  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3031  if( currentValue1 < currentValue2 )
3032  {
3033  // we have a big problem; basically, we are saying that
3034  // dof currentValue is on one model and not on the other
3035  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3036  indexInTLComp1++;
3037  continue;
3038  }
3039  if( currentValue1 > currentValue2 )
3040  {
3041  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3042  indexInTLComp2++;
3043  continue;
3044  }
3045  int size1 = 1;
3046  int size2 = 1;
3047  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3048  size1++;
3049  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3050  size2++;
3051  // must be found in both lists, find the start and end indices
3052  for( int i1 = 0; i1 < size1; i1++ )
3053  {
3054  for( int i2 = 0; i2 < size2; i2++ )
3055  {
3056  // send the info back to components
3057  int n = TLBackToComp1.get_n();
3058  TLBackToComp1.reserve();
3059  TLBackToComp1.vi_wr[3 * n] =
3060  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3061  // came from, info from comp2
3062  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3063  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3064  n = TLBackToComp2.get_n();
3065  TLBackToComp2.reserve();
3066  TLBackToComp2.vi_wr[3 * n] =
3067  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3068  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3069  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3070  // what if there are repeated markers in TLcomp2? increase just index2
3071  }
3072  }
3073  indexInTLComp1 += size1;
3074  indexInTLComp2 += size2;
3075  }
3076  }
3077  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3078  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3079 
3080  if( *pid1 >= 0 )
3081  {
3082  // we are on original comp 1 tasks
3083  // before ordering
3084  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3085  // processors it communicates with
3086 #ifdef VERBOSE
3087  std::stringstream f1;
3088  f1 << "TLBack1_" << localRank << ".txt";
3089  TLBackToComp1.print_to_file( f1.str().c_str() );
3090 #endif
3091  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3092  TLBackToComp1.sort( 1, &sort_buffer );
3093  sort_buffer.reset();
3094 #ifdef VERBOSE
3095  TLBackToComp1.print_to_file( f1.str().c_str() );
3096 #endif
3097  // so we are now on pid1, we know now each marker were it has to go
3098  // add a new method to ParCommGraph, to set up the involved_IDs_map
3099  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3100  }
3101  if( *pid2 >= 0 )
3102  {
3103  // we are on original comp 1 tasks
3104  // before ordering
3105  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3106  // processors it communicates with
3107 #ifdef VERBOSE
3108  std::stringstream f2;
3109  f2 << "TLBack2_" << localRank << ".txt";
3110  TLBackToComp2.print_to_file( f2.str().c_str() );
3111 #endif
3112  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3113  TLBackToComp2.sort( 2, &sort_buffer );
3114  sort_buffer.reset();
3115 #ifdef VERBOSE
3116  TLBackToComp2.print_to_file( f2.str().c_str() );
3117 #endif
3118  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3119  }
3120  return moab::MB_SUCCESS;
3121 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MBQUAD, MBVERTEX, appData::pgraph, moab::TupleList::print_to_file(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::TupleList::resize(), moab::ParCommGraph::settle_comm_by_ids(), moab::Range::size(), moab::TupleList::sort(), moab::Interface::tag_get_data(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::TupleList::vi_rd, and moab::TupleList::vi_wr.

◆ iMOAB_CreateElements()

ErrCode iMOAB_CreateElements ( iMOAB_AppID  pid,
int *  num_elem,
int *  type,
int *  num_nodes_per_element,
int *  connectivity,
int *  block_ID 
)

Create elements for an app; it assumes connectivity from local vertices, in order.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_elem(int*) Number of elements.
[in]type(int*) Type of element (moab type).
[in]num_nodes_per_element(int*) Number of nodes per element.
[in]connectivity(int *) Connectivity array, with respect to vertices (assumed contiguous).
[in]block_ID(int *) The local block identifier, which will now contain the elements.
Returns
ErrCode The error code indicating success or failure.

add the new ents to the clock set

Definition at line 2213 of file iMOAB.cpp.

2219 {
2220  // Create elements
2221  appData& data = context.appDatas[*pid];
2222 
2223  ReadUtilIface* read_iface;
2224  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2225 
2226  EntityType mbtype = (EntityType)( *type );
2227  EntityHandle actual_start_handle;
2228  EntityHandle* array = nullptr;
2229  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2230 
2231  // fill up with actual connectivity from input; assume the vertices are in order, and start
2232  // vertex is the first in the current data vertex range
2233  EntityHandle firstVertex = data.local_verts[0];
2234 
2235  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2236  {
2237  array[j] = connectivity[j] + firstVertex - 1;
2238  } // assumes connectivity uses 1 based array (from fortran, mostly)
2239 
2240  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2241 
2242  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2243 
2244  data.primary_elems.merge( new_elems );
2245 
2246  // add to adjacency
2247  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2248  // organize all new elements in block, with the given block ID; if the block set is not
2249  // existing, create a new mesh set;
2250  Range sets;
2251  int set_no = *block_ID;
2252  const void* setno_ptr = &set_no;
2254  sets );
2255  EntityHandle block_set;
2256 
2257  if( MB_FAILURE == rval || sets.empty() )
2258  {
2259  // create a new set, with this block ID
2260  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2261 
2262  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2263 
2264  // add the material set to file set
2265  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2266  }
2267  else
2268  {
2269  block_set = sets[0];
2270  } // first set is the one we want
2271 
2272  /// add the new ents to the clock set
2273  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2274 
2275  return moab::MB_SUCCESS;
2276 }

References moab::Interface::add_entities(), GlobalContext::appDatas, context, moab::Interface::create_meshset(), moab::Range::empty(), ErrorCode, appData::file_set, moab::ReadUtilIface::get_element_connect(), moab::Interface::get_entities_by_type_and_tag(), appData::local_verts, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, moab::Range::merge(), MESHSET_SET, appData::primary_elems, moab::Interface::query_interface(), moab::Interface::tag_set_data(), and moab::ReadUtilIface::update_adjacencies().

◆ iMOAB_CreateVertices()

ErrCode iMOAB_CreateVertices ( iMOAB_AppID  pid,
int *  coords_len,
int *  dim,
double *  coordinates 
)

Create vertices for an app; it assumes no other vertices.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_len(int*) Size of the coordinates array (nverts * dim).
[in]dim(int*) Dimension of the vertex coordinates (usually 3).
[in]coordinates(double*) Coordinates of all vertices, interleaved.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2192 of file iMOAB.cpp.

2193 {
2194  ErrorCode rval;
2195  appData& data = context.appDatas[*pid];
2196 
2197  if( !data.local_verts.empty() ) // we should have no vertices in the app
2198  {
2199  return moab::MB_FAILURE;
2200  }
2201 
2202  int nverts = *coords_len / *dim;
2203 
2204  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2205 
2206  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2207 
2208  // also add the vertices to the all_verts range
2209  data.all_verts.merge( data.local_verts );
2210  return moab::MB_SUCCESS;
2211 }

References moab::Interface::add_entities(), appData::all_verts, GlobalContext::appDatas, context, moab::Interface::create_vertices(), dim, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::merge().

◆ iMOAB_DefineTagStorage()

ErrCode iMOAB_DefineTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  tag_type,
int *  components_per_entity,
int *  tag_index 
)

Define a MOAB Tag corresponding to the application depending on requested types.

Note
In MOAB, for most solution vectors, we only need to create a "Dense", "Double" Tag. A sparse tag can be created too. If the tag is already existing in the file, it will not be created. If it is a new tag, memory will be allocated when setting the values. Default values are 0 for for integer tags, 0.0 for double tags, 0 for entity handle tags.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retrieve the data in MOAB.
[in]tag_type(int*) The type of MOAB tag (Dense/Sparse, Double/Int/EntityHandle), enum MOAB_TAG_TYPE.
[in]components_per_entity(int*) The total size of vector dimension per entity for the tag (e.g., number of doubles per entity).
[out]tag_index(int*) The tag index which can be used as identifier in synchronize methods.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1488 of file iMOAB.cpp.

1493 {
1494  // we have 6 types of tags supported so far
1495  // check if tag type is valid
1496  if( *tag_type < 0 || *tag_type > 5 ) return moab::MB_FAILURE;
1497 
1498  DataType tagDataType;
1499  TagType tagType;
1500  void* defaultVal = nullptr;
1501  int* defInt = new int[*components_per_entity];
1502  double* defDouble = new double[*components_per_entity];
1503  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1504 
1505  for( int i = 0; i < *components_per_entity; i++ )
1506  {
1507  defInt[i] = 0;
1508  defDouble[i] = -1e+10;
1509  defHandle[i] = static_cast< EntityHandle >( 0 );
1510  }
1511 
1512  switch( *tag_type )
1513  {
1514  case 0:
1515  tagDataType = MB_TYPE_INTEGER;
1516  tagType = MB_TAG_DENSE;
1517  defaultVal = defInt;
1518  break;
1519 
1520  case 1:
1521  tagDataType = MB_TYPE_DOUBLE;
1522  tagType = MB_TAG_DENSE;
1523  defaultVal = defDouble;
1524  break;
1525 
1526  case 2:
1527  tagDataType = MB_TYPE_HANDLE;
1528  tagType = MB_TAG_DENSE;
1529  defaultVal = defHandle;
1530  break;
1531 
1532  case 3:
1533  tagDataType = MB_TYPE_INTEGER;
1534  tagType = MB_TAG_SPARSE;
1535  defaultVal = defInt;
1536  break;
1537 
1538  case 4:
1539  tagDataType = MB_TYPE_DOUBLE;
1540  tagType = MB_TAG_SPARSE;
1541  defaultVal = defDouble;
1542  break;
1543 
1544  case 5:
1545  tagDataType = MB_TYPE_HANDLE;
1546  tagType = MB_TAG_SPARSE;
1547  defaultVal = defHandle;
1548  break;
1549 
1550  default: {
1551  delete[] defInt;
1552  delete[] defDouble;
1553  delete[] defHandle;
1554  return moab::MB_FAILURE;
1555  } // error
1556  }
1557 
1558  // split storage names if separated list
1559  std::string tag_name( tag_storage_name );
1560  // first separate the names of the tags
1561  // we assume that there are separators ":" between the tag names
1562  std::vector< std::string > tagNames;
1563  std::string separator( ":" );
1564  split_tag_names( tag_name, separator, tagNames );
1565 
1566  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1567  appData& data = context.appDatas[*pid];
1568  int already_defined_tags = 0;
1569 
1570  Tag tagHandle;
1571  for( size_t i = 0; i < tagNames.size(); i++ )
1572  {
1573  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1574  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1575 
1576  if( MB_ALREADY_ALLOCATED == rval )
1577  {
1578  std::map< std::string, Tag >& mTags = data.tagMap;
1579  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1580 
1581  if( mit == mTags.end() )
1582  {
1583  // add it to the map
1584  mTags[tagNames[i]] = tagHandle;
1585  // push it to the list of tags, too
1586  *tag_index = (int)data.tagList.size();
1587  data.tagList.push_back( tagHandle );
1588  }
1589  rval = MB_SUCCESS;
1590  already_defined_tags++;
1591  }
1592  else if( MB_SUCCESS == rval )
1593  {
1594  data.tagMap[tagNames[i]] = tagHandle;
1595  *tag_index = (int)data.tagList.size();
1596  data.tagList.push_back( tagHandle );
1597  }
1598  else
1599  {
1600  rval = moab::MB_FAILURE; // some tags were not created
1601  }
1602  }
1603  // we don't need default values anymore, avoid leaks
1604  int rankHere = 0;
1605 #ifdef MOAB_HAVE_MPI
1606  ParallelComm* pco = context.appDatas[*pid].pcomm;
1607  rankHere = pco->rank();
1608 #endif
1609  if( !rankHere && already_defined_tags )
1610  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1611  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1612  << " tags \n";
1613  delete[] defInt;
1614  delete[] defDouble;
1615  delete[] defHandle;
1616  return rval;
1617 }

References GlobalContext::appDatas, context, ErrorCode, appData::global_id, MB_ALREADY_ALLOCATED, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_EXCL, MB_TAG_SPARSE, MB_TYPE_DOUBLE, MB_TYPE_HANDLE, MB_TYPE_INTEGER, GlobalContext::MBI, appData::name, moab::ParallelComm::rank(), split_tag_names(), moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and TagType.

Referenced by iMOAB_ReceiveMesh().

◆ iMOAB_DeregisterApplication()

ErrCode iMOAB_DeregisterApplication ( iMOAB_AppID  pid)

De-Register application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 334 of file iMOAB.cpp.

335 {
336  // the file set , parallel comm are all in vectors indexed by *pid
337  // assume we did not delete anything yet
338  // *pid will not be reused if we register another application
339  auto appIterator = context.appDatas.find( *pid );
340  // ensure that the application exists before we attempt to delete it
341  if( appIterator == context.appDatas.end() ) return MB_FAILURE;
342 
343  // we found the application
344  appData& data = appIterator->second;
345  int rankHere = 0;
346 #ifdef MOAB_HAVE_MPI
347  rankHere = data.pcomm->rank();
348 #endif
349  if( !rankHere )
350  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
351  << " is de-registered now \n";
352 
353  EntityHandle fileSet = data.file_set;
354  // get all entities part of the file set
355  Range fileents;
356  MB_CHK_ERR( context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true ) );
357  fileents.insert( fileSet );
358  MB_CHK_ERR( context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents ) ); // append all mesh sets
359 
360 #ifdef MOAB_HAVE_TEMPESTREMAP
361  if( data.tempestData.remapper ) delete data.tempestData.remapper;
362  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
363 #endif
364 
365 #ifdef MOAB_HAVE_MPI
366  // NOTE: we could get the pco also with the following workflow.
367  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
368 
369  auto& pargs = data.pgraph;
370  // free the parallel comm graphs associated with this app
371  for( auto mt = pargs.begin(); mt != pargs.end(); ++mt )
372  {
373  ParCommGraph* pgr = mt->second;
374  if( pgr != nullptr )
375  {
376  delete pgr;
377  pgr = nullptr;
378  }
379  }
380  // now free the ParallelComm resources
381  if( data.pcomm )
382  {
383  delete data.pcomm;
384  data.pcomm = nullptr;
385  }
386 #endif
387 
388  // delete first all except vertices
389  Range vertices = fileents.subset_by_type( MBVERTEX );
390  Range noverts = subtract( fileents, vertices );
391 
392  MB_CHK_ERR( context.MBI->delete_entities( noverts ) );
393  // now retrieve connected elements that still exist (maybe in other sets, pids?)
394  Range adj_ents_left;
395  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION ) );
396  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION ) );
397  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION ) );
398 
399  if( !adj_ents_left.empty() )
400  {
401  Range conn_verts;
402  MB_CHK_ERR( context.MBI->get_connectivity( adj_ents_left, conn_verts ) );
403  vertices = subtract( vertices, conn_verts );
404  }
405 
406  MB_CHK_ERR( context.MBI->delete_entities( vertices ) );
407 
408  for( auto mit = context.appIdMap.begin(); mit != context.appIdMap.end(); ++mit )
409  {
410  if( *pid == mit->second )
411  {
412 #ifdef MOAB_HAVE_MPI
413  if( data.pcomm )
414  {
415  delete data.pcomm;
416  data.pcomm = nullptr;
417  }
418 #endif
419  context.appIdMap.erase( mit );
420  break;
421  }
422  }
423 
424  // now we can finally delete the application data itself
425  context.appDatas.erase( appIterator );
426 
427  return moab::MB_SUCCESS;
428 }

References GlobalContext::appDatas, GlobalContext::appIdMap, context, moab::Interface::delete_entities(), moab::Range::empty(), appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Interface::get_entities_by_type(), appData::global_id, moab::Range::insert(), MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, appData::name, appData::pcomm, appData::pgraph, moab::ParallelComm::rank(), moab::Range::subset_by_type(), moab::subtract(), and moab::Interface::UNION.

Referenced by iMOAB_DeregisterApplicationFortran().

◆ iMOAB_DeregisterApplicationFortran()

ErrCode iMOAB_DeregisterApplicationFortran ( iMOAB_AppID  pid)

De-Register the Fortran based application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 430 of file iMOAB.cpp.

431 {
432  // release any Fortran specific allocations here before we pass it on
433  context.appDatas[*pid].is_fortran = false;
434 
435  // release all datastructure allocations
436  return iMOAB_DeregisterApplication( pid );
437 }

References GlobalContext::appDatas, context, and iMOAB_DeregisterApplication().

◆ iMOAB_DetermineGhostEntities()

ErrCode iMOAB_DetermineGhostEntities ( iMOAB_AppID  pid,
int *  ghost_dim,
int *  num_ghost_layers,
int *  bridge_dim 
)

Create the requested number of ghost layers for the parallel mesh.

Note
The routine assumes that the shared entities were resolved successfully, and that the mesh is properly distributed on separate tasks.

Operations: Collective.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]ghost_dim(int*) Desired ghost dimension (2 or 3, most of the time).
[in]num_ghost_layers(int*) Number of ghost layers requested.
[in]bridge_dim(int*) Bridge dimension (0 for vertices, 1 for edges, 2 for faces).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2355 of file iMOAB.cpp.

2356 {
2357  ErrorCode rval;
2358 
2359  // verify we have valid ghost layers input specified. If invalid, exit quick.
2360  if( num_ghost_layers && *num_ghost_layers <= 0 )
2361  {
2362  return moab::MB_SUCCESS;
2363  } // nothing to do
2364 
2365  appData& data = context.appDatas[*pid];
2366  ParallelComm* pco = context.appDatas[*pid].pcomm;
2367 
2368  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2369  constexpr int addl_ents = 0;
2370  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2371  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2372  for( int i = 2; i <= *num_ghost_layers; i++ )
2373  {
2374  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2375  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2376  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2377  }
2378 
2379  // Update ghost layer information
2380  data.num_ghost_layers = *num_ghost_layers;
2381 
2382  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2383  return iMOAB_UpdateMeshInfo( pid );
2384 }

References GlobalContext::appDatas, context, moab::ParallelComm::correct_thin_ghost_layers(), ErrorCode, moab::ParallelComm::exchange_ghost_cells(), appData::file_set, iMOAB_UpdateMeshInfo(), MB_CHK_ERR, MB_SUCCESS, and appData::num_ghost_layers.

◆ iMOAB_Finalize()

ErrCode iMOAB_Finalize ( void  )

Finalize the iMOAB interface implementation.

It will delete the internally reference counted MOAB instance, if the reference count reaches 0.

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 180 of file iMOAB.cpp.

181 {
183 
184  if( 0 == context.refCountMB )
185  {
186  delete context.MBI;
187  }
188 
189  return MB_SUCCESS;
190 }

References context, MB_SUCCESS, GlobalContext::MBI, and GlobalContext::refCountMB.

◆ iMOAB_FreeSenderBuffers()

ErrCode iMOAB_FreeSenderBuffers ( iMOAB_AppID  pid,
int *  context_id 
)

Definition at line 2781 of file iMOAB.cpp.

2782 {
2783  // need first to find the pgraph that holds the information we need
2784  // this will be called on sender side only
2785  appData& data = context.appDatas[*pid];
2786  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2787  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2788 
2789  mt->second->release_send_buffers();
2790  return moab::MB_SUCCESS;
2791 }

References GlobalContext::appDatas, context, MB_SUCCESS, and appData::pgraph.

◆ iMOAB_GetBlockElementConnectivities()

ErrCode iMOAB_GetBlockElementConnectivities ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivities for elements contained within a certain block.

Note
input is the block ID. Should we change to visible block local index?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]connectivity_length(int*) The allocated size of array. (typical size := vertices_per_element*num_elements_in_block)
[out]element_connectivity(int*) The connectivity array to store element ordering in MOAB canonical numbering scheme (array allocated by user); array contains vertex indices in the local numbering order for vertices elements are in the same order as provided by GetElementOwnership and GetElementID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1158 of file iMOAB.cpp.

1162 {
1163  assert( global_block_ID ); // ensure global block ID argument is not null
1164  assert( connectivity_length ); // ensure connectivity length argument is not null
1165 
1166  appData& data = context.appDatas[*pid];
1167  std::map< int, int >& matMap = data.matIndex;
1168  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1169 
1170  if( it == matMap.end() )
1171  {
1172  return moab::MB_FAILURE;
1173  } // error in finding block with id
1174 
1175  int blockIndex = matMap[*global_block_ID];
1176  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1177  std::vector< EntityHandle > elems;
1178 
1179  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1180 
1181  if( elems.empty() )
1182  {
1183  return moab::MB_FAILURE;
1184  }
1185 
1186  std::vector< EntityHandle > vconnect;
1187  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1188 
1189  if( *connectivity_length != (int)vconnect.size() )
1190  {
1191  return moab::MB_FAILURE;
1192  } // mismatched sizes
1193 
1194  for( int i = 0; i < *connectivity_length; i++ )
1195  {
1196  int inx = data.all_verts.index( vconnect[i] );
1197 
1198  if( -1 == inx )
1199  {
1200  return moab::MB_FAILURE;
1201  } // error, vertex not in local range
1202 
1203  element_connectivity[i] = inx;
1204  }
1205 
1206  return moab::MB_SUCCESS;
1207 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, and GlobalContext::MBI.

◆ iMOAB_GetBlockID()

ErrCode iMOAB_GetBlockID ( iMOAB_AppID  pid,
int *  block_length,
iMOAB_GlobalID global_block_IDs 
)

Get the global block IDs for all locally visible (owned and shared/ghosted) blocks.

Note
Block IDs are corresponding to MATERIAL_SET tags for material sets. Usually the block ID is exported from Cubit as a unique integer value. First blocks are local, and next blocks are fully ghosted. First blocks have at least one owned cell/element, ghost blocks have only ghost cells. Internally, a block corresponds to a mesh set with a MATERIAL_SET tag value equal to the block ID.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]block_length(int*) The allocated size of array (typical size := num_visible_blocks).
[out]global_block_IDs(iMOAB_GlobalID*) The global IDs for all locally visible blocks (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1035 of file iMOAB.cpp.

1036 {
1037  // local id blocks? they are counted from 0 to number of visible blocks ...
1038  // will actually return material set tag value for global
1039  Range& matSets = context.appDatas[*pid].mat_sets;
1040 
1041  if( *block_length != (int)matSets.size() )
1042  {
1043  return moab::MB_FAILURE;
1044  }
1045 
1046  // return material set tag gtags[0 is material set tag
1047  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1048 
1049  // populate map with index
1050  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1051  for( unsigned i = 0; i < matSets.size(); i++ )
1052  {
1053  matIdx[global_block_IDs[i]] = i;
1054  }
1055 
1056  return moab::MB_SUCCESS;
1057 }

References GlobalContext::appDatas, context, ErrorCode, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetBlockInfo()

ErrCode iMOAB_GetBlockInfo ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  vertices_per_element,
int *  num_elements_in_block 
)

Get the global block information and number of visible elements of belonging to a block (MATERIAL SET).

Note
A block has to be homogeneous, it can contain elements of a single type.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set to be queried.
[out]vertices_per_element(int*) The number of vertices per element.
[out]num_elements_in_block(int*) The number of elements in block.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1059 of file iMOAB.cpp.

1063 {
1064  assert( global_block_ID );
1065 
1066  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1067  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1068 
1069  if( it == matMap.end() )
1070  {
1071  return moab::MB_FAILURE;
1072  } // error in finding block with id
1073 
1074  int blockIndex = matMap[*global_block_ID];
1075  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1076  Range blo_elems;
1077  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1078 
1079  if( MB_SUCCESS != rval || blo_elems.empty() )
1080  {
1081  return moab::MB_FAILURE;
1082  }
1083 
1084  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1085 
1086  if( !blo_elems.all_of_type( type ) )
1087  {
1088  return moab::MB_FAILURE;
1089  } // not all of same type
1090 
1091  const EntityHandle* conn = nullptr;
1092  int num_verts = 0;
1093  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1094 
1095  *vertices_per_element = num_verts;
1096  *num_elements_in_block = (int)blo_elems.size();
1097 
1098  return moab::MB_SUCCESS;
1099 }

References moab::Range::all_of_type(), GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::type_from_handle().

◆ iMOAB_GetDoubleTagStorage()

ErrCode iMOAB_GetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Retrieve the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names to retrieve the data in MOAB.
[in]num_tag_storage_length(int) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity*num_tags or num_visible_elements*components_per_entity*num_tags)
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 2009 of file iMOAB.cpp.

2014 {
2015  ErrorCode rval;
2016  // exactly the same code, except tag type check
2017  std::string tag_names( tag_storage_names );
2018  // exactly the same code as for int tag :) maybe should check the type of tag too
2019  std::vector< std::string > tagNames;
2020  std::vector< Tag > tagHandles;
2021  std::string separator( ":" );
2022  split_tag_names( tag_names, separator, tagNames );
2023 
2024  appData& data = context.appDatas[*pid];
2025 
2026  // set it on a subset of entities, based on type and length
2027  Range* ents_to_get = nullptr;
2028 
2029  if( *ent_type == 0 ) // vertices
2030  {
2031  ents_to_get = &data.all_verts;
2032  }
2033  else if( *ent_type == 1 )
2034  {
2035  ents_to_get = &data.primary_elems;
2036  }
2037  int nents_to_get = (int)ents_to_get->size();
2038  int position = 0;
2039  for( size_t i = 0; i < tagNames.size(); i++ )
2040  {
2041  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2042  {
2043  return moab::MB_FAILURE;
2044  } // tag not defined
2045 
2046  Tag tag = data.tagMap[tagNames[i]];
2047 
2048  int tagLength = 0;
2049  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2050 
2051  DataType dtype;
2052  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2053 
2054  if( dtype != MB_TYPE_DOUBLE )
2055  {
2056  return moab::MB_FAILURE;
2057  }
2058 
2059  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2060  return moab::MB_FAILURE; // too many entity values to get
2061 
2062  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2063  position = position + nents_to_get * tagLength;
2064  }
2065 
2066  return moab::MB_SUCCESS; // no error
2067 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetElementConnectivity()

ErrCode iMOAB_GetElementConnectivity ( iMOAB_AppID  pid,
iMOAB_LocalID elem_index,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivity for one element only.

Note
This is a convenience method and in general should not be used unless necessary. You should use colaesced calls for multiple elements for efficiency.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]elem_index(iMOAB_LocalID *) Local element index.
[in,out]connectivity_length(int *) On input, maximum length of connectivity. On output, actual length.
[out]element_connectivity(int*) The connectivity array to store connectivity in MOAB canonical numbering scheme. Array contains vertex indices in the local numbering order for vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1209 of file iMOAB.cpp.

1213 {
1214  assert( elem_index ); // ensure element index argument is not null
1215  assert( connectivity_length ); // ensure connectivity length argument is not null
1216 
1217  appData& data = context.appDatas[*pid];
1218  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1219 
1220  int num_nodes;
1221  const EntityHandle* conn;
1222 
1223  EntityHandle eh = data.primary_elems[*elem_index];
1224 
1225  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1226 
1227  if( *connectivity_length < num_nodes )
1228  {
1229  return moab::MB_FAILURE;
1230  } // wrong number of vertices
1231 
1232  for( int i = 0; i < num_nodes; i++ )
1233  {
1234  int index = data.all_verts.index( conn[i] );
1235 
1236  if( -1 == index )
1237  {
1238  return moab::MB_FAILURE;
1239  }
1240 
1241  element_connectivity[i] = index;
1242  }
1243 
1244  *connectivity_length = num_nodes;
1245 
1246  return moab::MB_SUCCESS;
1247 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetElementID()

ErrCode iMOAB_GetElementID ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
iMOAB_GlobalID global_element_ID,
iMOAB_LocalID local_element_ID 
)

Get the global IDs for all locally visible elements belonging to a particular block.

Note
The method will return also the local index of each element, in the local range that contains all visible elements.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of global element ID array, same as num_elements_in_block returned from GetBlockInfo().
[out]global_element_ID(iMOAB_GlobalID*) The global IDs for all locally visible elements (array allocated by user).
[out]local_element_ID(iMOAB_LocalID*) (Optional) The local IDs for all locally visible elements (index in the range of all primary elements in the rank)
Returns
ErrCode The error code indicating success or failure.

Definition at line 1299 of file iMOAB.cpp.

1304 {
1305  assert( global_block_ID ); // ensure global block ID argument is not null
1306  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1307 
1308  appData& data = context.appDatas[*pid];
1309  std::map< int, int >& matMap = data.matIndex;
1310 
1311  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1312 
1313  if( it == matMap.end() )
1314  {
1315  return moab::MB_FAILURE;
1316  } // error in finding block with id
1317 
1318  int blockIndex = matMap[*global_block_ID];
1319  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1320  Range elems;
1321  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1322 
1323  if( elems.empty() )
1324  {
1325  return moab::MB_FAILURE;
1326  }
1327 
1328  if( *num_elements_in_block != (int)elems.size() )
1329  {
1330  return moab::MB_FAILURE;
1331  } // bad memory allocation
1332 
1333  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1334 
1335  // check that elems are among primary_elems in data
1336  for( int i = 0; i < *num_elements_in_block; i++ )
1337  {
1338  local_element_ID[i] = data.primary_elems.index( elems[i] );
1339 
1340  if( -1 == local_element_ID[i] )
1341  {
1342  return moab::MB_FAILURE;
1343  } // error, not in local primary elements
1344  }
1345 
1346  return moab::MB_SUCCESS;
1347 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_entities_by_handle(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetElementOwnership()

ErrCode iMOAB_GetElementOwnership ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
int *  element_ownership 
)

Get the element ownership within a certain block i.e., processor ID of the element owner.

Note
: Should we use local block index for input, instead of the global block ID ?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of ownership array, same as num_elements_in_block returned from GetBlockInfo().
[out]element_ownership(int*) The ownership array to store processor ID for all elements (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1249 of file iMOAB.cpp.

1253 {
1254  assert( global_block_ID ); // ensure global block ID argument is not null
1255  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1256 
1257  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1258 
1259  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1260 
1261  if( it == matMap.end() )
1262  {
1263  return moab::MB_FAILURE;
1264  } // error in finding block with id
1265 
1266  int blockIndex = matMap[*global_block_ID];
1267  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1268  Range elems;
1269 
1270  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1271 
1272  if( elems.empty() )
1273  {
1274  return moab::MB_FAILURE;
1275  }
1276 
1277  if( *num_elements_in_block != (int)elems.size() )
1278  {
1279  return moab::MB_FAILURE;
1280  } // bad memory allocation
1281 
1282  int i = 0;
1283 #ifdef MOAB_HAVE_MPI
1284  ParallelComm* pco = context.appDatas[*pid].pcomm;
1285 #endif
1286 
1287  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1288  {
1289 #ifdef MOAB_HAVE_MPI
1290  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1291 #else
1292  element_ownership[i] = 0; /* owned by 0 */
1293 #endif
1294  }
1295 
1296  return moab::MB_SUCCESS;
1297 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::empty(), moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::size().

◆ iMOAB_GetGlobalInfo()

ErrCode iMOAB_GetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Get global information about number of vertices and number of elements.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2286 of file iMOAB.cpp.

2287 {
2288  appData& data = context.appDatas[*pid];
2289  if( nullptr != num_global_verts )
2290  {
2291  *num_global_verts = data.num_global_vertices;
2292  }
2293  if( nullptr != num_global_elems )
2294  {
2295  *num_global_elems = data.num_global_elements;
2296  }
2297 
2298  return moab::MB_SUCCESS;
2299 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

◆ iMOAB_GetIntTagStorage()

ErrCode iMOAB_GetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Get the specified values in a MOAB integer Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to be copied from the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1661 of file iMOAB.cpp.

1666 {
1667  ErrorCode rval;
1668  std::string tag_name( tag_storage_name );
1669 
1670  appData& data = context.appDatas[*pid];
1671 
1672  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1673  {
1674  return moab::MB_FAILURE;
1675  } // tag not defined
1676 
1677  Tag tag = data.tagMap[tag_name];
1678 
1679  int tagLength = 0;
1680  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1681 
1682  DataType dtype;
1683  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1684 
1685  if( dtype != MB_TYPE_INTEGER )
1686  {
1687  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1688  }
1689 
1690  // set it on a subset of entities, based on type and length
1691  // if *entity_type = 0, then use vertices; else elements
1692  Range* ents_to_get = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1693  int nents_to_get = *num_tag_storage_length / tagLength;
1694 
1695  if( nents_to_get > (int)ents_to_get->size() )
1696  {
1697  return moab::MB_FAILURE;
1698  } // to many entities to get, or too little
1699 
1700  // now set the tag data
1701  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1702 
1703  return moab::MB_SUCCESS; // no error
1704 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetMeshInfo()

ErrCode iMOAB_GetMeshInfo ( iMOAB_AppID  pid,
int *  num_visible_vertices,
int *  num_visible_elements,
int *  num_visible_blocks,
int *  num_visible_surfaceBC,
int *  num_visible_vertexBC 
)

Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions.

Note
Number of visible vertices and cells include ghost entities. All arrays returned have size 3. Local entities are first, then ghost entities are next. Shared vertices can be owned in MOAB sense by different tasks. Ghost vertices and cells are always owned by other tasks.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[out]num_visible_vertices(int*) The number of vertices in the current partition/process arranged. as: owned/shared only, ghosted, total_visible (array allocated by user, size := 3).
[out]num_visible_elements(int*) The number of elements in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_blocks(int*) The number of material sets in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_surfaceBC(int*) The number of mesh surfaces that have a NEUMANN_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_vertexBC(int*) The number of vertices that have a DIRICHLET_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
Returns
ErrCode The error code indicating success or failure.

Definition at line 877 of file iMOAB.cpp.

883 {
884  ErrorCode rval;
885  appData& data = context.appDatas[*pid];
886  EntityHandle fileSet = data.file_set;
887 
888  // this will include ghost elements
889  // first clear all data ranges; this can be called after ghosting
890  if( num_visible_elements )
891  {
892  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
893  // separate ghost and local/owned primary elements
894  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
895  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
896  }
897  if( num_visible_vertices )
898  {
899  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
900  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
901  // local are those that are not ghosts; they may include shared, not owned vertices
902  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
903  }
904 
905  if( num_visible_blocks )
906  {
908  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
909 
910  num_visible_blocks[2] = data.mat_sets.size();
911  num_visible_blocks[0] = num_visible_blocks[2];
912  num_visible_blocks[1] = 0;
913  }
914 
915  if( num_visible_surfaceBC )
916  {
918  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
919 
920  num_visible_surfaceBC[2] = 0;
921  // count how many faces are in each neu set, and how many regions are
922  // adjacent to them;
923  int numNeuSets = (int)data.neu_sets.size();
924 
925  for( int i = 0; i < numNeuSets; i++ )
926  {
927  Range subents;
928  EntityHandle nset = data.neu_sets[i];
929  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
930 
931  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
932  {
933  EntityHandle subent = *it;
934  Range adjPrimaryEnts;
935  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
936 
937  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
938  }
939  }
940 
941  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
942  num_visible_surfaceBC[1] = 0;
943  }
944 
945  if( num_visible_vertexBC )
946  {
948  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
949 
950  num_visible_vertexBC[2] = 0;
951  int numDiriSets = (int)data.diri_sets.size();
952 
953  for( int i = 0; i < numDiriSets; i++ )
954  {
955  Range verts;
956  EntityHandle diset = data.diri_sets[i];
957  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
958 
959  num_visible_vertexBC[2] += (int)verts.size();
960  }
961 
962  num_visible_vertexBC[0] = num_visible_vertexBC[2];
963  num_visible_vertexBC[1] = 0;
964  }
965 
966  return moab::MB_SUCCESS;
967 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::primary_elems, moab::Range::size(), and moab::Interface::UNION.

◆ iMOAB_GetNeighborElements()

ErrCode iMOAB_GetNeighborElements ( iMOAB_AppID  pid,
iMOAB_LocalID local_index,
int *  num_adjacent_elements,
iMOAB_LocalID adjacent_element_IDs 
)

Retrieve the adjacencies for the element entities.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]local_index(iMOAB_LocalID*) The local element ID for which adjacency information is needed.
[out]num_adjacent_elements(int*) The total number of adjacent elements.
[out]adjacent_element_IDs(iMOAB_LocalID*) The local element IDs of all adjacent elements to the current one (typically, num_total_sides for internal elements or num_total_sides-num_sides_on_boundary for boundary elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2154 of file iMOAB.cpp.

2158 {
2159  ErrorCode rval;
2160 
2161  // one neighbor for each subentity of dimension-1
2162  MeshTopoUtil mtu( context.MBI );
2163  appData& data = context.appDatas[*pid];
2164  EntityHandle eh = data.primary_elems[*local_index];
2165  Range adjs;
2166  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2167 
2168  if( *num_adjacent_elements < (int)adjs.size() )
2169  {
2170  return moab::MB_FAILURE;
2171  } // not dimensioned correctly
2172 
2173  *num_adjacent_elements = (int)adjs.size();
2174 
2175  for( int i = 0; i < *num_adjacent_elements; i++ )
2176  {
2177  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2178  }
2179 
2180  return moab::MB_SUCCESS;
2181 }

References GlobalContext::appDatas, context, appData::dimension, ErrorCode, moab::MeshTopoUtil::get_bridge_adjacencies(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetPointerToSurfaceBC()

ErrCode iMOAB_GetPointerToSurfaceBC ( iMOAB_AppID  pid,
int *  surface_BC_length,
iMOAB_LocalID local_element_ID,
int *  reference_surface_ID,
int *  boundary_condition_value 
)

Get the surface boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]surface_BC_length(int*) The allocated size of surface boundary condition array, same as num_visible_surfaceBC returned by GetMeshInfo().
[out]local_element_ID(iMOAB_LocalID*) The local element IDs that contains the side with the surface BC.
[out]reference_surface_ID(int*) The surface number with the BC in the canonical reference element (e.g., 1 to 6 for HEX, 1-4 for TET).
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the NeumannSet defined on the element).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1349 of file iMOAB.cpp.

1354 {
1355  // we have to fill bc data for neumann sets;/
1356  ErrorCode rval;
1357 
1358  // it was counted above, in GetMeshInfo
1359  appData& data = context.appDatas[*pid];
1360  int numNeuSets = (int)data.neu_sets.size();
1361 
1362  int index = 0; // index [0, surface_BC_length) for the arrays returned
1363 
1364  for( int i = 0; i < numNeuSets; i++ )
1365  {
1366  Range subents;
1367  EntityHandle nset = data.neu_sets[i];
1368  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1369 
1370  int neuVal;
1371  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1372 
1373  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1374  {
1375  EntityHandle subent = *it;
1376  Range adjPrimaryEnts;
1377  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1378 
1379  // get global id of the primary ents, and side number of the quad/subentity
1380  // this is moab ordering
1381  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1382  {
1383  EntityHandle primaryEnt = *pit;
1384  // get global id
1385  /*int globalID;
1386  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1387  if (MB_SUCCESS!=rval)
1388  return moab::MB_FAILURE;
1389  global_element_ID[index] = globalID;*/
1390 
1391  // get local element id
1392  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1393 
1394  if( -1 == local_element_ID[index] )
1395  {
1396  return moab::MB_FAILURE;
1397  } // did not find the element locally
1398 
1399  int side_number, sense, offset;
1400  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1401 
1402  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1403  boundary_condition_value[index] = neuVal;
1404  index++;
1405  }
1406  }
1407  }
1408 
1409  if( index != *surface_BC_length )
1410  {
1411  return moab::MB_FAILURE;
1412  } // error in array allocations
1413 
1414  return moab::MB_SUCCESS;
1415 }

References GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, moab::Range::end(), ErrorCode, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::primary_elems, moab::Interface::side_number(), moab::side_number(), moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetPointerToVertexBC()

ErrCode iMOAB_GetPointerToVertexBC ( iMOAB_AppID  pid,
int *  vertex_BC_length,
iMOAB_LocalID local_vertex_ID,
int *  boundary_condition_value 
)

Get the vertex boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertex_BC_length(int) The allocated size of vertex boundary condition array, same as num_visible_vertexBC returned by GetMeshInfo().
[out]local_vertex_ID(iMOAB_LocalID*) The local vertex ID that has Dirichlet BC defined.
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the Dirichlet_Set tag defined on the vertex).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1417 of file iMOAB.cpp.

1421 {
1422  ErrorCode rval;
1423 
1424  // it was counted above, in GetMeshInfo
1425  appData& data = context.appDatas[*pid];
1426  int numDiriSets = (int)data.diri_sets.size();
1427  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1428 
1429  for( int i = 0; i < numDiriSets; i++ )
1430  {
1431  Range verts;
1432  EntityHandle diset = data.diri_sets[i];
1433  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1434 
1435  int diriVal;
1436  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1437 
1438  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1439  {
1440  EntityHandle vt = *vit;
1441  /*int vgid;
1442  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1443  if (MB_SUCCESS!=rval)
1444  return moab::MB_FAILURE;
1445  global_vertext_ID[index] = vgid;*/
1446  local_vertex_ID[index] = data.all_verts.index( vt );
1447 
1448  if( -1 == local_vertex_ID[index] )
1449  {
1450  return moab::MB_FAILURE;
1451  } // vertex was not found
1452 
1453  boundary_condition_value[index] = diriVal;
1454  index++;
1455  }
1456  }
1457 
1458  if( *vertex_BC_length != index )
1459  {
1460  return moab::MB_FAILURE;
1461  } // array allocation issue
1462 
1463  return moab::MB_SUCCESS;
1464 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexID()

ErrCode iMOAB_GetVertexID ( iMOAB_AppID  pid,
int *  vertices_length,
iMOAB_GlobalID global_vertex_ID 
)

Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices.

Note
The array should be allocated by the user, sized with the total number of visible vertices from iMOAB_GetMeshInfo method.

Operations: Not collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typical size := num_visible_vertices).
[out]global_vertex_ID(iMOAB_GlobalID*) The global IDs for all locally visible vertices (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 969 of file iMOAB.cpp.

970 {
971  IMOAB_CHECKPOINTER( vertices_length, 2 );
972  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
973 
974  const Range& verts = context.appDatas[*pid].all_verts;
975  // check for problems with array length
976  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
977 
978  // global id tag is context.globalID_tag
979  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
980 }

References GlobalContext::appDatas, context, GlobalContext::globalID_tag, IMOAB_ASSERT, IMOAB_CHECKPOINTER, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexOwnership()

ErrCode iMOAB_GetVertexOwnership ( iMOAB_AppID  pid,
int *  vertices_length,
int *  visible_global_rank_ID 
)

Get vertex ownership information.

For each vertex based on the local ID, return the process that owns the vertex (local, shared or ghost)

Note
Shared vertices could be owned by different tasks. Local and shared vertices are first, ghost vertices are next in the array. Ghost vertices are always owned by a different process ID. Array allocated by the user with total size of visible vertices.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typically size := num_visible_vertices).
[out]visible_global_rank_ID(int*) The processor rank owning each of the local vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 982 of file iMOAB.cpp.

983 {
984  assert( vertices_length && *vertices_length );
985  assert( visible_global_rank_ID );
986 
987  Range& verts = context.appDatas[*pid].all_verts;
988  int i = 0;
989 #ifdef MOAB_HAVE_MPI
990  ParallelComm* pco = context.appDatas[*pid].pcomm;
991 
992  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
993  {
994  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
995  }
996 
997  if( i != *vertices_length )
998  {
999  return moab::MB_FAILURE;
1000  } // warning array allocation problem
1001 
1002 #else
1003 
1004  /* everything owned by proc 0 */
1005  if( (int)verts.size() != *vertices_length )
1006  {
1007  return moab::MB_FAILURE;
1008  } // warning array allocation problem
1009 
1010  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1011  {
1012  visible_global_rank_ID[i] = 0;
1013  } // all vertices are owned by processor 0, as this is serial run
1014 
1015 #endif
1016 
1017  return moab::MB_SUCCESS;
1018 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, and moab::Range::size().

◆ iMOAB_GetVisibleElementsInfo()

ErrCode iMOAB_GetVisibleElementsInfo ( iMOAB_AppID  pid,
int *  num_visible_elements,
iMOAB_GlobalID element_global_IDs,
int *  ranks,
iMOAB_GlobalID block_IDs 
)

Get the visible elements information.

Return for all visible elements the global IDs, ranks they belong to, block ids they belong to.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_visible_elements(int*) The number of visible elements (returned by GetMeshInfo).
[out]element_global_IDs(iMOAB_GlobalID*) Element global ids to be added to the block.
[out]ranks(int*) The owning ranks of elements.
[out]block_IDs(iMOAB_GlobalID*) The block ids containing the elements.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1101 of file iMOAB.cpp.

1106 {
1107  appData& data = context.appDatas[*pid];
1108 #ifdef MOAB_HAVE_MPI
1109  ParallelComm* pco = context.appDatas[*pid].pcomm;
1110 #endif
1111 
1112  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1113 
1114  int i = 0;
1115 
1116  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1117  {
1118 #ifdef MOAB_HAVE_MPI
1119  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1120 
1121 #else
1122  /* everything owned by task 0 */
1123  ranks[i] = 0;
1124 #endif
1125  }
1126 
1127  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1128  {
1129  EntityHandle matMeshSet = *mit;
1130  Range elems;
1131  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1132 
1133  int valMatTag;
1134  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1135 
1136  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1137  {
1138  EntityHandle eh = *eit;
1139  int index = data.primary_elems.index( eh );
1140 
1141  if( -1 == index )
1142  {
1143  return moab::MB_FAILURE;
1144  }
1145 
1146  if( -1 >= *num_visible_elements )
1147  {
1148  return moab::MB_FAILURE;
1149  }
1150 
1151  block_IDs[index] = valMatTag;
1152  }
1153  }
1154 
1155  return moab::MB_SUCCESS;
1156 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Interface::tag_get_data().

◆ iMOAB_GetVisibleVerticesCoordinates()

ErrCode iMOAB_GetVisibleVerticesCoordinates ( iMOAB_AppID  pid,
int *  coords_length,
double *  coordinates 
)

Get vertex coordinates for all local (owned and ghosted) vertices.

Note
coordinates are returned in an array allocated by user, interleaved. (do need an option for blocked coordinates ?) size of the array is dimension times number of visible vertices. The local ordering is implicit, owned/shared vertices are first, then ghosts.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_length(int*) The size of the allocated coordinate array (array allocated by user, size := 3*num_visible_vertices).
[out]coordinates(double*) The pointer to user allocated memory that will be filled with interleaved coordinates.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1020 of file iMOAB.cpp.

1021 {
1022  Range& verts = context.appDatas[*pid].all_verts;
1023 
1024  // interleaved coordinates, so that means deep copy anyway
1025  if( *coords_length != 3 * (int)verts.size() )
1026  {
1027  return moab::MB_FAILURE;
1028  }
1029 
1030  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1031 
1032  return moab::MB_SUCCESS;
1033 }

References GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_coords(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::size().

◆ iMOAB_Initialize()

ErrCode iMOAB_Initialize ( int  argc,
iMOAB_String argv 
)

Initialize the iMOAB interface implementation.

Note
Will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Parameters
[in]argc(int) Number of command line arguments
[in]argv(iMOAB_String*) Command line arguments
Returns
ErrCode

Definition at line 131 of file iMOAB.cpp.

132 {
133  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
134 
135  context.iArgc = argc;
136  context.iArgv = argv; // shallow copy
137 
138  if( 0 == context.refCountMB )
139  {
140  context.MBI = new( std::nothrow ) moab::Core;
141  // retrieve the default tags
142  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
144  // materials (blocks), surface-BC (neumann), vertex-BC (Dirichlet), global-id
145  Tag gtags[4];
146  for( int i = 0; i < 4; i++ )
147  {
148  MB_CHK_ERR(
149  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY ) );
150  }
151  // now set the relevant (standard) tags for future use
152  context.material_tag = gtags[0];
153  context.neumann_tag = gtags[1];
154  context.dirichlet_tag = gtags[2];
155  context.globalID_tag = gtags[3];
156 
157  context.MPI_initialized = false;
158 #ifdef MOAB_HAVE_MPI
159  int flagInit;
160  MPI_Initialized( &flagInit );
161 
162  if( flagInit && !context.MPI_initialized )
163  {
164  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
165  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
166  context.MPI_initialized = true;
167  }
168 #endif
169  }
170 
172  return moab::MB_SUCCESS;
173 }

References context, DIRICHLET_SET_TAG_NAME, GlobalContext::dirichlet_tag, GLOBAL_ID_TAG_NAME, GlobalContext::globalID_tag, GlobalContext::globalrank, GlobalContext::iArgc, GlobalContext::iArgv, IMOAB_CHECKPOINTER, MATERIAL_SET_TAG_NAME, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MB_TAG_ANY, MB_TYPE_INTEGER, GlobalContext::MBI, GlobalContext::MPI_initialized, NEUMANN_SET_TAG_NAME, GlobalContext::neumann_tag, GlobalContext::refCountMB, moab::Interface::tag_get_handle(), and GlobalContext::worldprocs.

Referenced by iMOAB_InitializeFortran().

◆ iMOAB_InitializeFortran()

ErrCode iMOAB_InitializeFortran ( void  )

Initialize the iMOAB interface implementation from Fortran driver.

It will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 175 of file iMOAB.cpp.

176 {
177  return iMOAB_Initialize( 0, 0 );
178 }

References iMOAB_Initialize().

◆ iMOAB_LoadMesh()

ErrCode iMOAB_LoadMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  read_options,
int *  num_ghost_layers 
)

Load a MOAB mesh file in parallel and exchange ghost layers as requested.

Note
All communication is MPI-based, and read options include parallel loading information, resolving shared entities. Local MOAB instance is populated with mesh cells and vertices in the corresponding local partitions.

This will also exchange ghost cells and vertices, as requested. The default bridge dimension is 0 (vertices), and all additional lower dimensional sub-entities are exchanged (mesh edges and faces). The tags in the file are not exchanged by default. Default tag information for GLOBAL_ID, MATERIAL_SET, NEUMANN_SET and DIRICHLET_SET is exchanged. Global ID tag is exchanged for all cells and vertices. Material sets, Neumann sets and Dirichlet sets are all augmented with the ghost entities.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to load onto the internal application mesh set.
[in]read_options(iMOAB_String) Additional options for reading the MOAB mesh file in parallel.
[in]num_ghost_layers(int*) The total number of ghost layers to exchange during mesh loading.
Returns
ErrCode The error code indicating success or failure.

Definition at line 583 of file iMOAB.cpp.

587 {
588  IMOAB_CHECKPOINTER( filename, 2 );
589  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
590  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
591 
592  // make sure we use the file set and pcomm associated with the *pid
593  std::ostringstream newopts;
594  if( read_options ) newopts << read_options;
595 
596 #ifdef MOAB_HAVE_MPI
597 
599  {
600  if( context.worldprocs > 1 )
601  {
602  std::string opts( ( read_options ? read_options : "" ) );
603  std::string pcid( "PARALLEL_COMM=" );
604  std::size_t found = opts.find( pcid );
605 
606  if( found != std::string::npos )
607  {
608  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
609  return moab::MB_FAILURE;
610  }
611 
612  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
613  // files (used in test_remapping)
614  std::string filen( filename );
615  std::string::size_type idx = filen.rfind( '.' );
616 
617  if( idx != std::string::npos )
618  {
619  ParallelComm* pco = context.appDatas[*pid].pcomm;
620  std::string extension = filen.substr( idx + 1 );
621  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
622  newopts << ";;PARALLEL_COMM=" << pco->get_id();
623  }
624 
625  if( *num_ghost_layers >= 1 )
626  {
627  // if we want ghosts, we will want additional entities, the last .1
628  // because the addl ents can be edges, faces that are part of the neumann sets
629  std::string pcid2( "PARALLEL_GHOSTS=" );
630  std::size_t found2 = opts.find( pcid2 );
631 
632  if( found2 != std::string::npos )
633  {
634  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
635  "number of layers \n";
636  }
637  else
638  {
639  // dimension of primary entities is 3 here, but it could be 2 for climate
640  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
641  // example: ";PARALLEL_GHOSTS=2.0.1"
642  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
643  }
644  }
645  }
646  }
647 #else
648  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
649 #endif
650 
651  // Now let us actually load the MOAB file with the appropriate read options
652  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
653 
654 #ifdef VERBOSE
655  // some debugging stuff
656  std::ostringstream outfile;
657 #ifdef MOAB_HAVE_MPI
658  ParallelComm* pco = context.appDatas[*pid].pcomm;
659  int rank = pco->rank();
660  int nprocs = pco->size();
661  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
662 #else
663  outfile << "TaskMesh_n1.0.h5m";
664 #endif
665  // the mesh contains ghosts too, but they are not part of mat/neumann set
666  // write in serial the file, to see what tags are missing
667  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
668 #endif
669 
670  // Update ghost layer information
671  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
672 
673  // Update mesh information
674  return iMOAB_UpdateMeshInfo( pid );
675 }

References GlobalContext::appDatas, context, ErrorCode, moab::ParallelComm::get_id(), IMOAB_ASSERT, IMOAB_CHECKPOINTER, iMOAB_UpdateMeshInfo(), moab::Interface::load_file(), MB_CHK_ERR, GlobalContext::MBI, GlobalContext::MPI_initialized, moab::ParallelComm::rank(), moab::ParallelComm::size(), GlobalContext::worldprocs, and moab::Interface::write_file().

◆ iMOAB_MergeVertices()

ErrCode iMOAB_MergeVertices ( iMOAB_AppID  pid)

Definition at line 3123 of file iMOAB.cpp.

3124 {
3125  appData& data = context.appDatas[*pid];
3126  ParallelComm* pco = context.appDatas[*pid].pcomm;
3127  // collapse vertices and transform cells into triangles/quads /polys
3128  // tags we care about: area, frac, global id
3129  std::vector< Tag > tagsList;
3130  Tag tag;
3131  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3132  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3133  tagsList.push_back( tag );
3134  rval = context.MBI->tag_get_handle( "area", tag );
3135  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3136  rval = context.MBI->tag_get_handle( "frac", tag );
3137  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3138  double tol = 1.0e-9;
3139  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3140 
3141  // clean material sets of cells that were deleted
3143  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3144 
3145  if( !data.mat_sets.empty() )
3146  {
3147  EntityHandle matSet = data.mat_sets[0];
3148  Range elems;
3149  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3150  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3151  // put back only cells from data.file_set
3152  elems.clear();
3153  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3154  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3155  }
3156  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3157 
3158  ParallelMergeMesh pmm( pco, tol );
3159  rval = pmm.merge( data.file_set,
3160  /* do not do local merge*/ false,
3161  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3162 
3163  // assign global ids only for vertices, cells have them fine
3164  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3165 
3166  // set the partition tag on the file set
3167  Tag part_tag;
3168  int dum_id = -1;
3169  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3170  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3171 
3172  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3173  {
3174  std::cout << " can't get par part tag.\n";
3175  return moab::MB_FAILURE;
3176  }
3177 
3178  int rank = pco->rank();
3179  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3180 
3181  return moab::MB_SUCCESS;
3182 }

References moab::Interface::add_entities(), GlobalContext::appDatas, moab::ParallelComm::assign_global_ids(), moab::Range::clear(), context, moab::Range::empty(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), iMOAB_UpdateMeshInfo(), appData::mat_sets, GlobalContext::material_tag, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, MBENTITYSET, GlobalContext::MBI, moab::ParallelMergeMesh::merge(), moab::ParallelComm::rank(), moab::IntxUtils::remove_duplicate_vertices(), moab::Interface::remove_entities(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), and moab::Interface::UNION.

◆ iMOAB_ReadHeaderInfo()

ErrCode iMOAB_ReadHeaderInfo ( const iMOAB_String  filename,
int *  num_global_vertices,
int *  num_global_elements,
int *  num_dimension,
int *  num_parts 
)

It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file.

Note
Operations: Not collective
Parameters
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to probe for header information.
[out]num_global_vertices(int*) The total number of vertices in the mesh file.
[out]num_global_elements(int*) The total number of elements (of highest dimension only).
[out]num_dimension(int*) The highest dimension of elements in the mesh (Edge=1, Tri/Quad=2, Tet/Hex/Prism/Pyramid=3).
[out]num_parts(int*) The total number of partitions available in the mesh file, typically partitioned with mbpart during pre-processing.
Returns
ErrCode The error code indicating success or failure.

Definition at line 439 of file iMOAB.cpp.

444 {
445  IMOAB_CHECKPOINTER( filename, 1 );
446  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
447 
448 #ifdef MOAB_HAVE_HDF5
449  std::string filen( filename );
450 
451  int edges = 0;
452  int faces = 0;
453  int regions = 0;
454  if( num_global_vertices ) *num_global_vertices = 0;
455  if( num_global_elements ) *num_global_elements = 0;
456  if( num_dimension ) *num_dimension = 0;
457  if( num_parts ) *num_parts = 0;
458 
459  mhdf_FileHandle file;
460  mhdf_Status status;
461  unsigned long max_id;
462  struct mhdf_FileDesc* data;
463 
464  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
465 
466  if( mhdf_isError( &status ) )
467  {
468  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
469  return moab::MB_FAILURE;
470  }
471 
472  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
473  1 ); // will use extra set info; will get parallel partition tag info too!
474 
475  if( mhdf_isError( &status ) )
476  {
477  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
478  return moab::MB_FAILURE;
479  }
480 
481  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
482  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
483 
484  for( int i = 0; i < data->num_elem_desc; i++ )
485  {
486  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
487  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
488 
489  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
490  {
491  edges += ent_d->count;
492  }
493 
494  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
495  {
496  faces += ent_d->count;
497  }
498 
499  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
500  {
501  faces += ent_d->count;
502  }
503 
504  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
505  {
506  faces += ent_d->count;
507  }
508 
509  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
510  {
511  regions += ent_d->count;
512  }
513 
514  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
515  {
516  regions += ent_d->count;
517  }
518 
519  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
520  {
521  regions += ent_d->count;
522  }
523 
524  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
525  {
526  regions += ent_d->count;
527  }
528 
529  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
530  {
531  regions += ent_d->count;
532  }
533 
534  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
535  {
536  regions += ent_d->count;
537  }
538 
539  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
540  {
541  regions += ent_d->count;
542  }
543  }
544 
545  if( num_parts ) *num_parts = data->numEntSets[0];
546 
547  // is this required?
548  if( edges > 0 )
549  {
550  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
551  if( num_global_elements ) *num_global_elements = edges;
552  }
553 
554  if( faces > 0 )
555  {
556  if( num_dimension ) *num_dimension = 2;
557  if( num_global_elements ) *num_global_elements = faces;
558  }
559 
560  if( regions > 0 )
561  {
562  if( num_dimension ) *num_dimension = 3;
563  if( num_global_elements ) *num_global_elements = regions;
564  }
565 
566  mhdf_closeFile( file, &status );
567 
568  free( data );
569 
570 #else
571  std::cout << filename
572  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
573  "formats other than a h5m file.\n";
574  if( num_global_vertices ) *num_global_vertices = 0;
575  if( num_global_elements ) *num_global_elements = 0;
576  if( num_dimension ) *num_dimension = 0;
577  if( num_parts ) *num_parts = 0;
578 #endif
579 
580  return moab::MB_SUCCESS;
581 }

References mhdf_EntDesc::count, mhdf_ElemDesc::desc, mhdf_FileDesc::elems, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_SUCCESS, mdhf_HEX_TYPE_NAME, mdhf_KNIFE_TYPE_NAME, mhdf_closeFile(), mhdf_EDGE_TYPE_NAME, mhdf_getFileSummary(), mhdf_isError(), mhdf_message(), mhdf_openFile(), mhdf_POLYGON_TYPE_NAME, mhdf_POLYHEDRON_TYPE_NAME, mhdf_PRISM_TYPE_NAME, mhdf_PYRAMID_TYPE_NAME, mhdf_QUAD_TYPE_NAME, mhdf_SEPTAHEDRON_TYPE_NAME, mhdf_TET_TYPE_NAME, mhdf_TRI_TYPE_NAME, mhdf_FileDesc::nodes, mhdf_FileDesc::num_elem_desc, mhdf_FileDesc::numEntSets, mhdf_ElemDesc::type, and mhdf_EntDesc::vals_per_ent.

◆ iMOAB_ReceiveElementTag()

ErrCode iMOAB_ReceiveElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  joint_communicator,
int *  context_id 
)

Definition at line 2715 of file iMOAB.cpp.

2719 {
2720  appData& data = context.appDatas[*pid];
2721  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2722  if( mt == data.pgraph.end() ) return moab::MB_FAILURE;
2723 
2724  ParCommGraph* cgraph = mt->second;
2725  ParallelComm* pco = context.appDatas[*pid].pcomm;
2726  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2727  : *joint_communicator );
2728  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2729 
2730  // how do I know if this receiver already participated in an intersection driven by coupler?
2731  // also, what if this was the "source" mesh in intx?
2732  // in that case, the elements might have been instantiated in the coverage set locally, the
2733  // "owned" range can be different the elements are now in tempestRemap coverage_set
2734  EntityHandle cover_set = cgraph->get_cover_set();
2735  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2736 
2737  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2738  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2739  // from ints remapper
2740 #ifdef MOAB_HAVE_TEMPESTREMAP
2741  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2742  {
2743  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2744  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2745  }
2746 #endif
2747 
2748  std::string tag_name( tag_storage_name );
2749  // we assume that there are separators ";" between the tag names
2750  std::vector< std::string > tagNames;
2751  std::vector< Tag > tagHandles;
2752  std::string separator( ":" );
2753  split_tag_names( tag_name, separator, tagNames );
2754  for( size_t i = 0; i < tagNames.size(); i++ )
2755  {
2756  Tag tagHandle;
2757  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2758  if( MB_SUCCESS != rval || nullptr == tagHandle )
2759  {
2760  MB_CHK_SET_ERR( moab::MB_FAILURE,
2761  " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i );
2762  }
2763  tagHandles.push_back( tagHandle );
2764  }
2765 
2766 #ifdef VERBOSE
2767  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2768  << " and file set = " << ( data.file_set ) << "\n";
2769 #endif
2770  // pco is needed to pack, and for moab instance, not for communication!
2771  // still use nonblocking communication
2772  MB_CHK_SET_ERR( cgraph->receive_tag_values( global, pco, owned, tagHandles ), "failed to receive tag values" );
2773 
2774 #ifdef VERBOSE
2775  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2776 #endif
2777 
2778  return moab::MB_SUCCESS;
2779 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, appData::file_set, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, appData::pgraph, appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_ReceiveMesh()

ErrCode iMOAB_ReceiveMesh ( iMOAB_AppID  pid,
MPI_Comm *  joint_communicator,
MPI_Group *  sendingGroup,
int *  scompid 
)

Definition at line 2482 of file iMOAB.cpp.

2483 {
2484  assert( joint_communicator != nullptr );
2485  assert( sendingGroup != nullptr );
2486  assert( scompid != nullptr );
2487 
2488  ErrorCode rval;
2489  appData& data = context.appDatas[*pid];
2490  ParallelComm* pco = context.appDatas[*pid].pcomm;
2491  MPI_Comm receive = pco->comm();
2492  EntityHandle local_set = data.file_set;
2493 
2494  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2495  : *joint_communicator );
2496  MPI_Group sendGroup =
2497  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2498 
2499  // first see what are the processors in each group; get the sender group too, from the sender
2500  // communicator
2501  MPI_Group receiverGroup;
2502  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2503 
2504  // instantiate the par comm graph
2505  ParCommGraph* cgraph =
2506  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2507  // TODO we should search if we have another pcomm with the same comp ids in the list already
2508  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2509  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2510 
2511  int receiver_rank = -1;
2512  MPI_Comm_rank( receive, &receiver_rank );
2513 
2514  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2515  // knows what data to expect
2516  std::vector< int > pack_array;
2517  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2518 
2519  // senders across for the current receiver
2520  int current_receiver = cgraph->receiver( receiver_rank );
2521 
2522  std::vector< int > senders_local;
2523  size_t n = 0;
2524 
2525  while( n < pack_array.size() )
2526  {
2527  if( current_receiver == pack_array[n] )
2528  {
2529  for( int j = 0; j < pack_array[n + 1]; j++ )
2530  {
2531  senders_local.push_back( pack_array[n + 2 + j] );
2532  }
2533 
2534  break;
2535  }
2536 
2537  n = n + 2 + pack_array[n + 1];
2538  }
2539 
2540 #ifdef VERBOSE
2541  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2542  << senders_local.size() << " tasks: ";
2543 
2544  for( int k = 0; k < (int)senders_local.size(); k++ )
2545  {
2546  std::cout << " " << senders_local[k];
2547  }
2548 
2549  std::cout << "\n";
2550 #endif
2551 
2552  if( senders_local.empty() )
2553  {
2554  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2555  }
2556  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2557 
2558  // after we are done, we could merge vertices that come from different senders, but
2559  // have the same global id
2560  Tag idtag;
2561  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2562 
2563  // data.point_cloud = false;
2564  Range local_ents;
2565  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2566 
2567  // do not do merge if point cloud
2568  if( !local_ents.all_of_type( MBVERTEX ) )
2569  {
2570  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2571  // that might come from different senders
2572  {
2573 
2574  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2575  Range local_elems = subtract( local_ents, local_verts );
2576 
2577  // remove from local set the vertices
2578  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2579 
2580 #ifdef VERBOSE
2581  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2582 #endif
2583  MergeMesh mm( context.MBI );
2584 
2585  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2586 
2587  Range new_verts; // local elems are local entities without vertices
2588  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2589 
2590 #ifdef VERBOSE
2591  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2592 #endif
2593  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2594  }
2595  }
2596  else
2597  data.point_cloud = true;
2598 
2599  if( !data.point_cloud )
2600  {
2601  // still need to resolve shared entities (in this case, vertices )
2602  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2603  }
2604  else
2605  {
2606  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2607  Tag densePartTag;
2608  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2609  if( nullptr != densePartTag && MB_SUCCESS == rval )
2610  {
2611  Range local_verts;
2612  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2613  std::vector< int > vals;
2614  int rank = pco->rank();
2615  vals.resize( local_verts.size(), rank );
2616  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2617  }
2618  }
2619  // set the parallel partition tag
2620  Tag part_tag;
2621  int dum_id = -1;
2622  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2623  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2624 
2625  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2626  {
2627  std::cout << " can't get par part tag.\n";
2628  return moab::MB_FAILURE;
2629  }
2630 
2631  int rank = pco->rank();
2632  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2633 
2634  // make sure that the GLOBAL_ID is defined as a tag
2635  int tagtype = 0; // dense, integer
2636  int numco = 1; // size
2637  int tagindex; // not used
2638  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2639  // populate the mesh with current data info
2640  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2641 
2642  // mark for deletion
2643  MPI_Group_free( &receiverGroup );
2644 
2645  return moab::MB_SUCCESS;
2646 }

References moab::Interface::add_entities(), moab::Range::all_of_type(), GlobalContext::appDatas, CHK_MPI_ERR, moab::ParallelComm::comm(), context, ErrorCode, appData::file_set, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_handle(), iMOAB_DefineTagStorage(), iMOAB_UpdateMeshInfo(), appData::is_fortran, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, MBVERTEX, moab::MergeMesh::merge_using_integer_tag(), appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_comm_graph(), moab::ParCommGraph::receive_mesh(), moab::ParCommGraph::receiver(), moab::Interface::remove_entities(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Range::subset_by_type(), moab::subtract(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_ReduceTagsMax()

ErrCode iMOAB_ReduceTagsMax ( iMOAB_AppID  pid,
int *  tag_index,
int *  ent_type 
)

Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_index(int*) The tag index of interest.
[in]ent_type(int*) The type of entity for tag reduction operation (vertices = 0, elements = 1)
Returns
ErrCode The error code indicating success or failure.

Definition at line 2114 of file iMOAB.cpp.

2115 {
2116 #ifdef MOAB_HAVE_MPI
2117  appData& data = context.appDatas[*pid];
2118  Range ent_exchange;
2119 
2120  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2121  {
2122  return moab::MB_FAILURE;
2123  } // error in tag index
2124 
2125  Tag tagh = data.tagList[*tag_index];
2126 
2127  if( *ent_type == 0 )
2128  {
2129  ent_exchange = data.all_verts;
2130  }
2131  else if( *ent_type == 1 )
2132  {
2133  ent_exchange = data.primary_elems;
2134  }
2135  else
2136  {
2137  return moab::MB_FAILURE;
2138  } // unexpected type
2139 
2140  ParallelComm* pco = context.appDatas[*pid].pcomm;
2141  // we could do different MPI_Op; do not bother now, we will call from fortran
2142  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2143 
2144 #else
2145  /* do nothing if serial */
2146  // just silence the warning
2147  // do not call sync tags in serial!
2148  int k = *pid + *tag_index + *ent_type;
2149  k++; // just do junk, to avoid complaints
2150 #endif
2151  return moab::MB_SUCCESS;
2152 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, appData::primary_elems, moab::ParallelComm::reduce_tags(), and appData::tagList.

◆ iMOAB_RegisterApplication()

ErrCode iMOAB_RegisterApplication ( const iMOAB_String  app_name,
MPI_Comm *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 216 of file iMOAB.cpp.

222 {
223  IMOAB_CHECKPOINTER( app_name, 1 );
224 #ifdef MOAB_HAVE_MPI
225  IMOAB_CHECKPOINTER( comm, 2 );
226  IMOAB_CHECKPOINTER( compid, 3 );
227 #else
228  IMOAB_CHECKPOINTER( compid, 2 );
229 #endif
230 
231  // will create a parallel comm for this application too, so there will be a
232  // mapping from *pid to file set and to parallel comm instances
233  std::string name( app_name );
234 
235  if( context.appIdMap.find( name ) != context.appIdMap.end() )
236  {
237  std::cout << " application " << name << " already registered \n";
238  return moab::MB_FAILURE;
239  }
240 
241  // trivial hash: just use the id that the user provided and assume it is unique
242  // *pid = *compid;
243  // hash: compute a unique hash as a combination of the appname and the component id
244  *pid = apphash( app_name, *compid );
245  // store map of application name and the ID we just generated
246  context.appIdMap[name] = *pid;
247  int rankHere = 0;
248 #ifdef MOAB_HAVE_MPI
249  MPI_Comm_rank( *comm, &rankHere );
250 #endif
251  if( !rankHere )
252  std::cout << " application " << name << " with ID = " << *pid << " and external id: " << *compid
253  << " is registered now \n";
254  if( *compid <= 0 )
255  {
256  std::cout << " convention for external application is to have its id positive \n";
257  return moab::MB_FAILURE;
258  }
259 
260  // create now the file set that will be used for loading the model in
261  EntityHandle file_set;
262  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
263 
264  appData app_data;
265  app_data.file_set = file_set;
266  app_data.global_id = *compid; // will be used mostly for par comm graph
267  app_data.name = name; // save the name of application
268 
269 #ifdef MOAB_HAVE_TEMPESTREMAP
270  app_data.tempestData.remapper = nullptr; // Only allocate as needed
271  app_data.tempestData.num_src_ghost_layers = 0;
272  app_data.tempestData.num_tgt_ghost_layers = 0;
273 #endif
274 
275  // set some default values
276  app_data.num_ghost_layers = 0;
277  app_data.point_cloud = false;
278  app_data.is_fortran = false;
279 #ifdef MOAB_HAVE_TEMPESTREMAP
280  app_data.secondary_file_set = app_data.file_set;
281 #endif
282 
283 #ifdef MOAB_HAVE_MPI
284  if( *comm ) app_data.pcomm = new ParallelComm( context.MBI, *comm );
285 #endif
286  context.appDatas[*pid] = app_data; // it will correspond to app_FileSets[*pid] will be the file set of interest
287  return moab::MB_SUCCESS;
288 }

References GlobalContext::appDatas, apphash(), GlobalContext::appIdMap, context, moab::Interface::create_meshset(), ErrorCode, appData::file_set, appData::global_id, IMOAB_CHECKPOINTER, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MESHSET_SET, appData::name, appData::num_ghost_layers, appData::pcomm, and appData::point_cloud.

Referenced by iMOAB_RegisterApplicationFortran().

◆ iMOAB_RegisterApplicationFortran()

ErrCode iMOAB_RegisterApplicationFortran ( const iMOAB_String  app_name,
int *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 290 of file iMOAB.cpp.

296 {
297  IMOAB_CHECKPOINTER( app_name, 1 );
298 #ifdef MOAB_HAVE_MPI
299  IMOAB_CHECKPOINTER( comm, 2 );
300  IMOAB_CHECKPOINTER( compid, 3 );
301 #else
302  IMOAB_CHECKPOINTER( compid, 2 );
303 #endif
304 
305  ErrCode err;
306  assert( app_name != nullptr );
307  std::string name( app_name );
308 
309 #ifdef MOAB_HAVE_MPI
310  MPI_Comm ccomm;
311  if( comm )
312  {
313  // Convert from Fortran communicator to a C communicator
314  // NOTE: see transfer of handles at
315  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
316  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
317  }
318 #endif
319 
320  // now call C style registration function:
321  err = iMOAB_RegisterApplication( app_name,
322 #ifdef MOAB_HAVE_MPI
323  &ccomm,
324 #endif
325  compid, pid );
326 
327  // Now that we have created the application context, store that
328  // the application being registered is from a Fortran context
329  context.appDatas[*pid].is_fortran = true;
330 
331  return err;
332 }

References GlobalContext::appDatas, context, ErrCode, IMOAB_CHECKPOINTER, iMOAB_RegisterApplication(), and MOAB_HAVE_MPI.

◆ iMOAB_ResolveSharedEntities()

ErrCode iMOAB_ResolveSharedEntities ( iMOAB_AppID  pid,
int *  num_verts,
int *  marker 
)

Resolve shared entities using global markers on shared vertices.

Note
Global markers can be a global node id, for example, or a global DoF number (as for HOMME)

Operations: Collective .

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_verts(int*) Number of vertices.
[in]marker(int*) Resolving marker (global id marker).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2304 of file iMOAB.cpp.

2305 {
2306  appData& data = context.appDatas[*pid];
2307  ParallelComm* pco = context.appDatas[*pid].pcomm;
2308  EntityHandle cset = data.file_set;
2309  int dum_id = 0;
2310  ErrorCode rval;
2311  if( data.primary_elems.empty() )
2312  {
2313  // skip actual resolve, assume vertices are distributed already ,
2314  // no need to share them
2315  }
2316  else
2317  {
2318  // create an integer tag for resolving ; maybe it can be a long tag in the future
2319  // (more than 2 B vertices;)
2320 
2321  Tag stag;
2322  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2323  &dum_id );MB_CHK_ERR( rval );
2324 
2325  if( *num_verts > (int)data.local_verts.size() )
2326  {
2327  return moab::MB_FAILURE;
2328  } // we are not setting the size
2329 
2330  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2331 
2332  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2333 
2334  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2335  }
2336  // provide partition tag equal to rank
2337  Tag part_tag;
2338  dum_id = -1;
2339  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2340  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2341 
2342  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2343  {
2344  std::cout << " can't get par part tag.\n";
2345  return moab::MB_FAILURE;
2346  }
2347 
2348  int rank = pco->rank();
2349  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2350 
2351  return moab::MB_SUCCESS;
2352 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::ParallelComm::rank(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Interface::tag_delete(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_SendElementTag()

ErrCode iMOAB_SendElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  joint_communicator,
int *  context_id 
)

Definition at line 2648 of file iMOAB.cpp.

2652 {
2653  appData& data = context.appDatas[*pid];
2654  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2655  if( mt == data.pgraph.end() )
2656  {
2657  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2658  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2659  std::cout << " " << mit->first;
2660  std::cout << "\n";
2661  return moab::MB_FAILURE;
2662  }
2663 
2664  ParCommGraph* cgraph = mt->second;
2665  ParallelComm* pco = context.appDatas[*pid].pcomm;
2666  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2667  : *joint_communicator );
2668  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2669 
2670 #ifdef MOAB_HAVE_TEMPESTREMAP
2671  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2672  {
2673  EntityHandle cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2674  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2675  }
2676 #else
2677  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2678  // how do I know if this receiver already participated in an intersection driven by coupler?
2679  // also, what if this was the "source" mesh in intx?
2680  // in that case, the elements might have been instantiated in the coverage set locally, the
2681  // "owned" range can be different the elements are now in tempestRemap coverage_set
2682  EntityHandle cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2683  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2684 #endif
2685 
2686  std::string tag_name( tag_storage_name );
2687 
2688  // basically, we assume everything is defined already on the tag,
2689  // and we can get the tags just by its name
2690  // we assume that there are separators ":" between the tag names
2691  std::vector< std::string > tagNames;
2692  std::vector< Tag > tagHandles;
2693  std::string separator( ":" );
2694  split_tag_names( tag_name, separator, tagNames );
2695  for( size_t i = 0; i < tagNames.size(); i++ )
2696  {
2697  Tag tagHandle;
2698  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2699  if( MB_SUCCESS != rval || nullptr == tagHandle )
2700  {
2701  MB_CHK_SET_ERR( moab::MB_FAILURE,
2702  "can't get tag handle with name: " << tagNames[i].c_str() << " at index " << i );
2703  }
2704  tagHandles.push_back( tagHandle );
2705  }
2706 
2707  // pco is needed to pack, and for moab instance, not for communication!
2708  // still use nonblocking communication, over the joint comm
2709  MB_CHK_ERR( cgraph->send_tag_values( global, pco, owned, tagHandles ) );
2710  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2711 
2712  return moab::MB_SUCCESS;
2713 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, appData::pgraph, appData::point_cloud, moab::ParCommGraph::send_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_SendMesh()

ErrCode iMOAB_SendMesh ( iMOAB_AppID  pid,
MPI_Comm *  joint_communicator,
MPI_Group *  receivingGroup,
int *  rcompid,
int *  method 
)

Definition at line 2386 of file iMOAB.cpp.

2391 {
2392  assert( joint_communicator != nullptr );
2393  assert( receivingGroup != nullptr );
2394  assert( rcompid != nullptr );
2395 
2396  ErrorCode rval;
2397  int ierr;
2398  appData& data = context.appDatas[*pid];
2399  ParallelComm* pco = context.appDatas[*pid].pcomm;
2400 
2401  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2402  : *joint_communicator );
2403  MPI_Group recvGroup =
2404  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2405  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2406  // first see what are the processors in each group; get the sender group too, from the sender communicator
2407  MPI_Group senderGroup;
2408  ierr = MPI_Comm_group( sender, &senderGroup );
2409  if( ierr != 0 ) return moab::MB_FAILURE;
2410 
2411  // instantiate the par comm graph
2412  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2413  // int coid2)
2414  ParCommGraph* cgraph =
2415  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2416  // we should search if we have another pcomm with the same comp ids in the list already
2417  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2418  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2419 
2420  int sender_rank = -1;
2421  MPI_Comm_rank( sender, &sender_rank );
2422 
2423  // decide how to distribute elements to each processor
2424  // now, get the entities on local processor, and pack them into a buffer for various processors
2425  // we will do trivial partition: first get the total number of elements from "sender"
2426  std::vector< int > number_elems_per_part;
2427  // how to distribute local elements to receiving tasks?
2428  // trivial partition: compute first the total number of elements need to be sent
2429  Range owned = context.appDatas[*pid].owned_elems;
2430  if( owned.size() == 0 )
2431  {
2432  // must be vertices that we want to send then
2433  owned = context.appDatas[*pid].local_verts;
2434  // we should have some vertices here
2435  }
2436 
2437  if( *method == 0 ) // trivial partitioning, old method
2438  {
2439  int local_owned_elem = (int)owned.size();
2440  int size = pco->size();
2441  int rank = pco->rank();
2442  number_elems_per_part.resize( size ); //
2443  number_elems_per_part[rank] = local_owned_elem;
2444 #if( MPI_VERSION >= 2 )
2445  // Use "in place" option
2446  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2447 #else
2448  {
2449  std::vector< int > all_tmp( size );
2450  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2451  number_elems_per_part = all_tmp;
2452  }
2453 #endif
2454 
2455  if( ierr != 0 )
2456  {
2457  return moab::MB_FAILURE;
2458  }
2459 
2460  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2461  // to each sender
2462  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2463 
2464  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2465  }
2466  else // *method != 0, so it is either graph or geometric, parallel
2467  {
2468  // owned are the primary elements on this app
2469  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2470 
2471  // basically, send the graph to the receiver side, with unblocking send
2472  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2473  }
2474  // pco is needed to pack, not for communication
2475  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2476 
2477  // mark for deletion
2478  MPI_Group_free( &senderGroup );
2479  return moab::MB_SUCCESS;
2480 }

References GlobalContext::appDatas, moab::ParallelComm::comm(), moab::ParCommGraph::compute_partition(), moab::ParCommGraph::compute_trivial_partition(), context, ErrorCode, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, moab::ParallelComm::rank(), moab::ParCommGraph::send_graph(), moab::ParCommGraph::send_graph_partition(), moab::ParCommGraph::send_mesh_parts(), size, moab::Range::size(), and moab::ParallelComm::size().

◆ iMOAB_SetDoubleTagStorage()

ErrCode iMOAB_SetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Store the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 1706 of file iMOAB.cpp.

1711 {
1712  ErrorCode rval;
1713  std::string tag_names( tag_storage_names );
1714  // exactly the same code as for int tag :) maybe should check the type of tag too
1715  std::vector< std::string > tagNames;
1716  std::vector< Tag > tagHandles;
1717  std::string separator( ":" );
1718  split_tag_names( tag_names, separator, tagNames );
1719 
1720  appData& data = context.appDatas[*pid];
1721  // set it on a subset of entities, based on type and length
1722  // if *entity_type = 0, then use vertices; else elements
1723  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1724 
1725  int nents_to_be_set = (int)( *ents_to_set ).size();
1726  int position = 0;
1727  for( size_t i = 0; i < tagNames.size(); i++ )
1728  {
1729  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1730  {
1731  return moab::MB_FAILURE;
1732  } // some tag not defined yet in the app
1733 
1734  Tag tag = data.tagMap[tagNames[i]];
1735 
1736  int tagLength = 0;
1737  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1738 
1739  DataType dtype;
1740  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1741 
1742  if( dtype != MB_TYPE_DOUBLE )
1743  {
1744  return moab::MB_FAILURE;
1745  }
1746 
1747  // set it on the subset of entities, based on type and length
1748  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1749  return moab::MB_FAILURE; // too many entity values to be set
1750 
1751  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1752  // increment position to next tag
1753  position = position + tagLength * nents_to_be_set;
1754  }
1755  return moab::MB_SUCCESS; // no error
1756 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SetDoubleTagStorageWithGid()

ErrCode iMOAB_SetDoubleTagStorageWithGid ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_names,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data,
int *  globalIds 
)

Store the specified values in a MOAB double Tag, for given ids.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[in]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be permuted over the local set of visible entities (either vertices or elements). unrolled by tags in parallel, might be different order, or different entities on the task
[in]globalIdsglobal ids of the cells to be set;
Returns
ErrCode The error code indicating success or failure.

Definition at line 1758 of file iMOAB.cpp.

1764 {
1765  ErrorCode rval;
1766  std::string tag_names( tag_storage_names );
1767  // exactly the same code as for int tag :) maybe should check the type of tag too
1768  std::vector< std::string > tagNames;
1769  std::vector< Tag > tagHandles;
1770  std::string separator( ":" );
1771  split_tag_names( tag_names, separator, tagNames );
1772 
1773  appData& data = context.appDatas[*pid];
1774  // set it on a subset of entities, based on type and length
1775  // if *entity_type = 0, then use vertices; else elements
1776  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1777  int nents_to_be_set = (int)( *ents_to_set ).size();
1778 
1779  Tag gidTag = context.MBI->globalId_tag();
1780  std::vector< int > gids;
1781  gids.resize( nents_to_be_set );
1782  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1783 
1784  // so we will need to set the tags according to the global id passed;
1785  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1786  // in local range is gids
1787  std::map< int, EntityHandle > eh_by_gid;
1788  int i = 0;
1789  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1790  {
1791  eh_by_gid[gids[i]] = *it;
1792  }
1793 
1794  std::vector< int > tagLengths( tagNames.size() );
1795  std::vector< Tag > tagList;
1796  size_t total_tag_len = 0;
1797  for( size_t i = 0; i < tagNames.size(); i++ )
1798  {
1799  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1800  {
1801  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1802  } // some tag not defined yet in the app
1803 
1804  Tag tag = data.tagMap[tagNames[i]];
1805  tagList.push_back( tag );
1806 
1807  int tagLength = 0;
1808  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1809 
1810  total_tag_len += tagLength;
1811  tagLengths[i] = tagLength;
1812  DataType dtype;
1813  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1814 
1815  if( dtype != MB_TYPE_DOUBLE )
1816  {
1817  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1818  }
1819  }
1820  bool serial = true;
1821 #ifdef MOAB_HAVE_MPI
1822  ParallelComm* pco = context.appDatas[*pid].pcomm;
1823  unsigned num_procs = pco->size();
1824  if( num_procs > 1 ) serial = false;
1825 #endif
1826 
1827  if( serial )
1828  {
1829  // we do not assume anymore that the number of entities has to match
1830  // we will set only what matches, and skip entities that do not have corresponding global ids
1831  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1832  // tags are unrolled, we loop over global ids first, then careful about tags
1833  for( int i = 0; i < nents_to_be_set; i++ )
1834  {
1835  int gid = globalIds[i];
1836  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1837  if( mapIt == eh_by_gid.end() ) continue;
1838  EntityHandle eh = mapIt->second;
1839  // now loop over tags
1840  int indexInTagValues = 0; //
1841  for( size_t j = 0; j < tagList.size(); j++ )
1842  {
1843  indexInTagValues += i * tagLengths[j];
1844  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1845  // advance the pointer/index
1846  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1847  }
1848  }
1849  }
1850 #ifdef MOAB_HAVE_MPI
1851  else // it can be not serial only if pco->size() > 1, parallel
1852  {
1853  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1854  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1855  // it is a 2-hop global gather scatter
1856  // TODO: allow for tags of different length; this is wrong
1857  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1858  // we do not expect the sizes to match
1859  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1860  TupleList TLsend;
1861  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1862  TLsend.enableWriteAccess();
1863  // the processor id that processes global_id is global_id / num_ents_per_proc
1864 
1865  int indexInRealLocal = 0;
1866  for( int i = 0; i < nbLocalVals; i++ )
1867  {
1868  // to proc, marker, element local index, index in el
1869  int marker = globalIds[i];
1870  int to_proc = marker % num_procs;
1871  int n = TLsend.get_n();
1872  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1873  TLsend.vi_wr[2 * n + 1] = marker;
1874  int indexInTagValues = 0;
1875  // tag data collect by number of tags
1876  for( size_t j = 0; j < tagList.size(); j++ )
1877  {
1878  indexInTagValues += i * tagLengths[j];
1879  for( int k = 0; k < tagLengths[j]; k++ )
1880  {
1881  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1882  }
1883  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1884  }
1885  TLsend.inc_n();
1886  }
1887 
1888  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1889  // send now requests, basically inform the rendez-vous point who needs a particular global id
1890  // send the data to the other processors:
1891  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1892  TupleList TLreq;
1893  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1894  TLreq.enableWriteAccess();
1895  for( int i = 0; i < nents_to_be_set; i++ )
1896  {
1897  // to proc, marker
1898  int marker = gids[i];
1899  int to_proc = marker % num_procs;
1900  int n = TLreq.get_n();
1901  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1902  TLreq.vi_wr[2 * n + 1] = marker;
1903  // tag data collect by number of tags
1904  TLreq.inc_n();
1905  }
1906  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1907 
1908  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1909  // we should first order by global id, and then build the new TL with send to proc, global id and
1910  // tags for it
1911  // sort by global ids the tuple lists
1912  moab::TupleList::buffer sort_buffer;
1913  sort_buffer.buffer_init( TLreq.get_n() );
1914  TLreq.sort( 1, &sort_buffer );
1915  sort_buffer.reset();
1916  sort_buffer.buffer_init( TLsend.get_n() );
1917  TLsend.sort( 1, &sort_buffer );
1918  sort_buffer.reset();
1919  // now send the tag values to the proc that requested it
1920  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1921  // each dof should have exactly one target proc. Is that true or not in general ?
1922  // how do we plan to use this? Is it better to store the comm graph for future
1923 
1924  // start copy from comm graph settle
1925  TupleList TLBack;
1926  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1927  TLBack.enableWriteAccess();
1928 
1929  int n1 = TLreq.get_n();
1930  int n2 = TLsend.get_n();
1931 
1932  int indexInTLreq = 0;
1933  int indexInTLsend = 0; // advance both, according to the marker
1934  if( n1 > 0 && n2 > 0 )
1935  {
1936 
1937  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1938  {
1939  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1940  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1941  if( currentValue1 < currentValue2 )
1942  {
1943  // we have a big problem; basically, we are saying that
1944  // dof currentValue is on one model and not on the other
1945  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1946  indexInTLreq++;
1947  continue;
1948  }
1949  if( currentValue1 > currentValue2 )
1950  {
1951  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1952  indexInTLsend++;
1953  continue;
1954  }
1955  int size1 = 1;
1956  int size2 = 1;
1957  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1958  size1++;
1959  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1960  size2++;
1961  // must be found in both lists, find the start and end indices
1962  for( int i1 = 0; i1 < size1; i1++ )
1963  {
1964  for( int i2 = 0; i2 < size2; i2++ )
1965  {
1966  // send the info back to components
1967  int n = TLBack.get_n();
1968  TLBack.reserve();
1969  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1970  // came from, info from comp2
1971  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1972  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1973  // also fill tag values
1974  for( size_t k = 0; k < total_tag_len; k++ )
1975  {
1976  TLBack.vr_rd[total_tag_len * n + k] =
1977  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
1978  }
1979  }
1980  }
1981  indexInTLreq += size1;
1982  indexInTLsend += size2;
1983  }
1984  }
1985  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
1986  // end copy from comm graph
1987  // after we are done sending, we need to set those tag values, in a reverse process compared to send
1988  n1 = TLBack.get_n();
1989  double* ptrVal = &TLBack.vr_rd[0]; //
1990  for( int i = 0; i < n1; i++ )
1991  {
1992  int gid = TLBack.vi_rd[3 * i + 1]; // marker
1993  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1994  if( mapIt == eh_by_gid.end() ) continue;
1995  EntityHandle eh = mapIt->second;
1996  // now loop over tags
1997 
1998  for( size_t j = 0; j < tagList.size(); j++ )
1999  {
2000  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2001  // advance the pointer/index
2002  ptrVal += tagLengths[j]; // at the end of tag data per call
2003  }
2004  }
2005  }
2006 #endif
2007  return MB_SUCCESS;
2008 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), moab::Range::end(), ErrorCode, moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SET_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::ParallelComm::proc_config(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::Range::size(), moab::ParallelComm::size(), moab::TupleList::sort(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), appData::tagMap, moab::TupleList::vi_rd, moab::TupleList::vi_wr, moab::TupleList::vr_rd, and moab::TupleList::vr_wr.

◆ iMOAB_SetGlobalInfo()

ErrCode iMOAB_SetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2278 of file iMOAB.cpp.

2279 {
2280  appData& data = context.appDatas[*pid];
2281  data.num_global_vertices = *num_global_verts;
2282  data.num_global_elements = *num_global_elems;
2283  return moab::MB_SUCCESS;
2284 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

Referenced by iMOAB_UpdateMeshInfo().

◆ iMOAB_SetIntTagStorage()

ErrCode iMOAB_SetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Store the specified values in a MOAB integer Tag.

Values are set on vertices or elements, depending on entity_type

Note
we could change the api to accept as input the tag index, as returned by iMOAB_DefineTagStorage.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1619 of file iMOAB.cpp.

1624 {
1625  std::string tag_name( tag_storage_name );
1626 
1627  // Get the application data
1628  appData& data = context.appDatas[*pid];
1629  // check if tag is defined
1630  if( data.tagMap.find( tag_name ) == data.tagMap.end() ) return moab::MB_FAILURE;
1631 
1632  Tag tag = data.tagMap[tag_name];
1633 
1634  int tagLength = 0;
1635  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1636 
1637  DataType dtype;
1638  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1639 
1640  if( dtype != MB_TYPE_INTEGER )
1641  {
1642  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1643  }
1644 
1645  // set it on a subset of entities, based on type and length
1646  // if *entity_type = 0, then use vertices; else elements
1647  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1648  int nents_to_be_set = *num_tag_storage_length / tagLength;
1649 
1650  if( nents_to_be_set > (int)ents_to_set->size() )
1651  {
1652  return moab::MB_FAILURE;
1653  } // to many entities to be set or too few
1654 
1655  // now set the tag data
1656  MB_CHK_ERR( context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data ) );
1657 
1658  return moab::MB_SUCCESS; // no error
1659 }

References appData::all_verts, GlobalContext::appDatas, context, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SynchronizeTags()

ErrCode iMOAB_SynchronizeTags ( iMOAB_AppID  pid,
int *  num_tag,
int *  tag_indices,
int *  ent_type 
)

Exchange tag values for the given tags across process boundaries.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_tags(int*) Number of tags to exchange.
[in]tag_indices(int*) Array with tag indices of interest (size = *num_tags).
[in]ent_type(int*) The type of entity for tag exchange.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2069 of file iMOAB.cpp.

2070 {
2071 #ifdef MOAB_HAVE_MPI
2072  appData& data = context.appDatas[*pid];
2073  Range ent_exchange;
2074  std::vector< Tag > tags;
2075 
2076  for( int i = 0; i < *num_tag; i++ )
2077  {
2078  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2079  {
2080  return moab::MB_FAILURE;
2081  } // error in tag index
2082 
2083  tags.push_back( data.tagList[tag_indices[i]] );
2084  }
2085 
2086  if( *ent_type == 0 )
2087  {
2088  ent_exchange = data.all_verts;
2089  }
2090  else if( *ent_type == 1 )
2091  {
2092  ent_exchange = data.primary_elems;
2093  }
2094  else
2095  {
2096  return moab::MB_FAILURE;
2097  } // unexpected type
2098 
2099  ParallelComm* pco = context.appDatas[*pid].pcomm;
2100 
2101  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2102 
2103 #else
2104  /* do nothing if serial */
2105  // just silence the warning
2106  // do not call sync tags in serial!
2107  int k = *pid + *num_tag + *tag_indices + *ent_type;
2108  k++;
2109 #endif
2110 
2111  return moab::MB_SUCCESS;
2112 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::ParallelComm::exchange_tags(), MB_CHK_ERR, MB_SUCCESS, appData::primary_elems, and appData::tagList.

◆ iMOAB_UpdateMeshInfo()

ErrCode iMOAB_UpdateMeshInfo ( iMOAB_AppID  pid)

Update local mesh data structure, from file information.

Note
The method should be called after mesh modifications, for example reading a file or creating mesh in memory

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 777 of file iMOAB.cpp.

778 {
779  // this will include ghost elements info
780  appData& data = context.appDatas[*pid];
781  EntityHandle fileSet = data.file_set;
782 
783  // first clear all data ranges; this can be called after ghosting
784  data.all_verts.clear();
785  data.primary_elems.clear();
786  data.local_verts.clear();
787  data.owned_verts.clear();
788  data.ghost_vertices.clear();
789  data.owned_elems.clear();
790  data.ghost_elems.clear();
791  data.mat_sets.clear();
792  data.neu_sets.clear();
793  data.diri_sets.clear();
794 
795  // Let us get all the vertex entities
796  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
797 
798  // Let us check first entities of dimension = 3
799  data.dimension = 3;
800  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
801 
802  if( data.primary_elems.empty() )
803  {
804  // Now 3-D elements. Let us check entities of dimension = 2
805  data.dimension = 2;
806  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
807 
808  if( data.primary_elems.empty() )
809  {
810  // Now 3-D/2-D elements. Let us check entities of dimension = 1
811  data.dimension = 1;
812  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
813 
814  if( data.primary_elems.empty() )
815  {
816  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
817  data.dimension = 0;
818  }
819  }
820  }
821 
822  // check if the current mesh is just a point cloud
823  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
824 
825 #ifdef MOAB_HAVE_MPI
826 
828  {
829  ParallelComm* pco = context.appDatas[*pid].pcomm;
830 
831  // filter ghost vertices, from local
832  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
833 
834  // Store handles for all ghosted entities
835  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
836 
837  // filter ghost elements, from local
838  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
839 
840  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
841  // now update global number of primary cells and global number of vertices
842  // determine first number of owned vertices
843  // Get local owned vertices
844  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
845  int local[2], global[2];
846  local[0] = data.owned_verts.size();
847  local[1] = data.owned_elems.size();
848  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
849  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
850  }
851  else
852  {
853  data.local_verts = data.all_verts;
854  data.owned_elems = data.primary_elems;
855  }
856 
857 #else
858 
859  data.local_verts = data.all_verts;
860  data.owned_elems = data.primary_elems;
861 
862 #endif
863 
864  // Get the references for some standard internal tags such as material blocks, BCs, etc
866  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
867 
869  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
870 
872  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
873 
874  return moab::MB_SUCCESS;
875 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::clear(), moab::ParallelComm::comm(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::empty(), ErrorCode, appData::file_set, moab::ParallelComm::filter_pstatus(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, iMOAB_SetGlobalInfo(), appData::local_verts, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, GlobalContext::MPI_initialized, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::owned_verts, appData::point_cloud, appData::primary_elems, PSTATUS_GHOST, PSTATUS_NOT, PSTATUS_NOT_OWNED, moab::Range::size(), moab::subtract(), and moab::Interface::UNION.

Referenced by iMOAB_DetermineGhostEntities(), iMOAB_LoadMesh(), iMOAB_MergeVertices(), and iMOAB_ReceiveMesh().

◆ iMOAB_WriteLocalMesh()

ErrCode iMOAB_WriteLocalMesh ( iMOAB_AppID  pid,
iMOAB_String  prefix 
)

Write a local MOAB mesh copy.

Note
The interface will write one file per task. Only the local mesh set and solution data associated to the application will be written to the file. Very convenient method used for debugging.

Operations: Not Collective (independent operation).

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]prefix(iMOAB_String) The MOAB file prefix that will be used with task ID in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 758 of file iMOAB.cpp.

759 {
760  IMOAB_CHECKPOINTER( prefix, 2 );
761  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
762 
763  std::ostringstream file_name;
764  int rank = 0, size = 1;
765 #ifdef MOAB_HAVE_MPI
766  ParallelComm* pcomm = context.appDatas[*pid].pcomm;
767  rank = pcomm->rank();
768  size = pcomm->size();
769 #endif
770  file_name << prefix << "_" << size << "_" << rank << ".h5m";
771  // Now let us actually write the file to disk with appropriate options
772  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
773 
774  return moab::MB_SUCCESS;
775 }

References GlobalContext::appDatas, context, ErrorCode, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::ParallelComm::rank(), size, moab::ParallelComm::size(), and moab::Interface::write_file().

◆ iMOAB_WriteMesh()

ErrCode iMOAB_WriteMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  write_options 
)

Write a MOAB mesh along with the solution tags to a file.

Note
The interface will write one single file (H5M) and for serial files (VTK/Exodus), it will write one file per task. Write options include parallel write options, if needed. Only the mesh set and solution data associated to the application will be written to the file.

Operations: Collective for parallel write, non collective for serial write.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to write all the entities contained in the internal application mesh set.
[in]write_options(iMOAB_String) Additional options for writing the MOAB mesh in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 753 of file iMOAB.cpp.

754 {
755  return internal_WriteMesh( pid, filename, write_options );
756 }

References internal_WriteMesh().

◆ internal_WriteMesh()

static ErrCode internal_WriteMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  write_options,
bool  primary_set = true 
)
static

Definition at line 677 of file iMOAB.cpp.

681 {
682  IMOAB_CHECKPOINTER( filename, 2 );
683  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
684 
685  appData& data = context.appDatas[*pid];
686  EntityHandle fileSet = data.file_set;
687 
688  std::ostringstream newopts;
689 #ifdef MOAB_HAVE_MPI
690  std::string write_opts( ( write_options ? write_options : "" ) );
691  std::string pcid( "PARALLEL_COMM=" );
692 
693  if( write_opts.find( pcid ) != std::string::npos )
694  {
695  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
696  return moab::MB_FAILURE;
697  }
698 
699  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
700  std::string pw( "PARALLEL=WRITE_PART" );
701  if( write_opts.find( pw ) != std::string::npos )
702  {
703  ParallelComm* pco = data.pcomm;
704  newopts << "PARALLEL_COMM=" << pco->get_id() << ";";
705  }
706 
707 #endif
708 
709 #ifdef MOAB_HAVE_TEMPESTREMAP
710  if( !primary_set )
711  {
712  if( data.tempestData.remapper != nullptr )
713  fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
714  else if( data.file_set != data.secondary_file_set )
715  fileSet = data.secondary_file_set;
716  else
717  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" );
718  }
719 #endif
720 
721  // append user write options to the one we have built
722  if( write_options ) newopts << write_options;
723 
724  std::vector< Tag > copyTagList = data.tagList;
725  // append Global ID and Parallel Partition
726  std::string gid_name_tag( "GLOBAL_ID" );
727 
728  // export global id tag, we need it always
729  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
730  {
731  Tag gid = context.MBI->globalId_tag();
732  copyTagList.push_back( gid );
733  }
734  // also Parallel_Partition PARALLEL_PARTITION
735  std::string pp_name_tag( "PARALLEL_PARTITION" );
736 
737  // write parallel part tag too, if it exists
738  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
739  {
740  Tag ptag;
741  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
742  if( ptag ) copyTagList.push_back( ptag );
743  }
744 
745  // Now let us actually write the file to disk with appropriate options
746  // MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, copyTagList.data(),
747  // copyTagList.size() ) );
748  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1 ) );
749 
750  return moab::MB_SUCCESS;
751 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, appData::file_set, moab::ParallelComm::get_id(), moab::Interface::globalId_tag(), IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::pcomm, moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and moab::Interface::write_file().

Referenced by iMOAB_WriteMesh().

◆ split_tag_names()

static void split_tag_names ( std::string  input_names,
std::string &  separator,
std::vector< std::string > &  list_tag_names 
)
static

Definition at line 1467 of file iMOAB.cpp.

1470 {
1471  size_t pos = 0;
1472  std::string token;
1473  while( ( pos = input_names.find( separator ) ) != std::string::npos )
1474  {
1475  token = input_names.substr( 0, pos );
1476  if( !token.empty() ) list_tag_names.push_back( token );
1477  // std::cout << token << std::endl;
1478  input_names.erase( 0, pos + separator.length() );
1479  }
1480  if( !input_names.empty() )
1481  {
1482  // if leftover something, or if not ended with delimiter
1483  list_tag_names.push_back( input_names );
1484  }
1485  return;
1486 }

Referenced by iMOAB_DefineTagStorage(), iMOAB_GetDoubleTagStorage(), iMOAB_ReceiveElementTag(), iMOAB_SendElementTag(), iMOAB_SetDoubleTagStorage(), and iMOAB_SetDoubleTagStorageWithGid().

Variable Documentation

◆ context