Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp File Reference
#include "moab/MOABConfig.h"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "moab/ParCommGraph.hpp"
#include "moab/ParallelMergeMesh.hpp"
#include "moab/IntxMesh/IntxUtils.hpp"
#include "DebugOutput.hpp"
#include "moab/iMOAB.h"
#include "mhdf.h"
#include <H5Tpublic.h>
#include "moab/CartVect.hpp"
#include "MBTagConventions.hpp"
#include "moab/MeshTopoUtil.hpp"
#include "moab/ReadUtilIface.hpp"
#include "moab/MergeMesh.hpp"
#include <cassert>
#include <sstream>
#include <iostream>
+ Include dependency graph for iMOAB.cpp:

Go to the source code of this file.

Classes

struct  appData
 
struct  GlobalContext
 

Functions

ErrCode iMOAB_Initialize (int argc, iMOAB_String *argv)
 Initialize the iMOAB interface implementation. More...
 
ErrCode iMOAB_InitializeFortran ()
 Initialize the iMOAB interface implementation from Fortran driver. More...
 
ErrCode iMOAB_Finalize ()
 Finalize the iMOAB interface implementation. More...
 
ErrCode iMOAB_RegisterApplication (const iMOAB_String app_name, MPI_Comm *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_RegisterApplicationFortran (const iMOAB_String app_name, int *comm, int *compid, iMOAB_AppID pid)
 
ErrCode iMOAB_DeregisterApplication (iMOAB_AppID pid)
 De-Register application: delete mesh (set) associated with the application ID. More...
 
ErrCode iMOAB_DeregisterApplicationFortran (iMOAB_AppID pid)
 De-Register the Fortran based application: delete mesh (set) associated with the application ID. More...
 
static void split_tag_names (std::string input_names, std::string &separator, std::vector< std::string > &list_tag_names)
 
ErrCode iMOAB_ReadHeaderInfo (const iMOAB_String filename, int *num_global_vertices, int *num_global_elements, int *num_dimension, int *num_parts)
 It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file. More...
 
ErrCode iMOAB_LoadMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options, int *num_ghost_layers)
 Load a MOAB mesh file in parallel and exchange ghost layers as requested. More...
 
ErrCode iMOAB_WriteMesh (iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options)
 Write a MOAB mesh along with the solution tags to a file. More...
 
ErrCode iMOAB_WriteLocalMesh (iMOAB_AppID pid, iMOAB_String prefix)
 Write a local MOAB mesh copy. More...
 
ErrCode iMOAB_UpdateMeshInfo (iMOAB_AppID pid)
 Update local mesh data structure, from file information. More...
 
ErrCode iMOAB_GetMeshInfo (iMOAB_AppID pid, int *num_visible_vertices, int *num_visible_elements, int *num_visible_blocks, int *num_visible_surfaceBC, int *num_visible_vertexBC)
 Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions. More...
 
ErrCode iMOAB_GetVertexID (iMOAB_AppID pid, int *vertices_length, iMOAB_GlobalID *global_vertex_ID)
 Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices. More...
 
ErrCode iMOAB_GetVertexOwnership (iMOAB_AppID pid, int *vertices_length, int *visible_global_rank_ID)
 Get vertex ownership information. More...
 
ErrCode iMOAB_GetVisibleVerticesCoordinates (iMOAB_AppID pid, int *coords_length, double *coordinates)
 Get vertex coordinates for all local (owned and ghosted) vertices. More...
 
ErrCode iMOAB_GetBlockID (iMOAB_AppID pid, int *block_length, iMOAB_GlobalID *global_block_IDs)
 Get the global block IDs for all locally visible (owned and shared/ghosted) blocks. More...
 
ErrCode iMOAB_GetBlockInfo (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *vertices_per_element, int *num_elements_in_block)
 Get the global block information and number of visible elements of belonging to a block (MATERIAL SET). More...
 
ErrCode iMOAB_GetVisibleElementsInfo (iMOAB_AppID pid, int *num_visible_elements, iMOAB_GlobalID *element_global_IDs, int *ranks, iMOAB_GlobalID *block_IDs)
 Get the visible elements information. More...
 
ErrCode iMOAB_GetBlockElementConnectivities (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *connectivity_length, int *element_connectivity)
 Get the connectivities for elements contained within a certain block. More...
 
ErrCode iMOAB_GetElementConnectivity (iMOAB_AppID pid, iMOAB_LocalID *elem_index, int *connectivity_length, int *element_connectivity)
 Get the connectivity for one element only. More...
 
ErrCode iMOAB_GetElementOwnership (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, int *element_ownership)
 Get the element ownership within a certain block i.e., processor ID of the element owner. More...
 
ErrCode iMOAB_GetElementID (iMOAB_AppID pid, iMOAB_GlobalID *global_block_ID, int *num_elements_in_block, iMOAB_GlobalID *global_element_ID, iMOAB_LocalID *local_element_ID)
 Get the global IDs for all locally visible elements belonging to a particular block. More...
 
ErrCode iMOAB_GetPointerToSurfaceBC (iMOAB_AppID pid, int *surface_BC_length, iMOAB_LocalID *local_element_ID, int *reference_surface_ID, int *boundary_condition_value)
 Get the surface boundary condition information. More...
 
ErrCode iMOAB_GetPointerToVertexBC (iMOAB_AppID pid, int *vertex_BC_length, iMOAB_LocalID *local_vertex_ID, int *boundary_condition_value)
 Get the vertex boundary condition information. More...
 
ErrCode iMOAB_DefineTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *tag_type, int *components_per_entity, int *tag_index)
 Define a MOAB Tag corresponding to the application depending on requested types. More...
 
ErrCode iMOAB_SetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Store the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_GetIntTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_name, int *num_tag_storage_length, int *ent_type, int *tag_storage_data)
 Get the specified values in a MOAB integer Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Store the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SetDoubleTagStorageWithGid (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data, int *globalIds)
 Store the specified values in a MOAB double Tag, for given ids. More...
 
ErrCode iMOAB_GetDoubleTagStorage (iMOAB_AppID pid, const iMOAB_String tag_storage_names, int *num_tag_storage_length, int *ent_type, double *tag_storage_data)
 Retrieve the specified values in a MOAB double Tag. More...
 
ErrCode iMOAB_SynchronizeTags (iMOAB_AppID pid, int *num_tag, int *tag_indices, int *ent_type)
 Exchange tag values for the given tags across process boundaries. More...
 
ErrCode iMOAB_ReduceTagsMax (iMOAB_AppID pid, int *tag_index, int *ent_type)
 Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index. More...
 
ErrCode iMOAB_GetNeighborElements (iMOAB_AppID pid, iMOAB_LocalID *local_index, int *num_adjacent_elements, iMOAB_LocalID *adjacent_element_IDs)
 Retrieve the adjacencies for the element entities. More...
 
ErrCode iMOAB_CreateVertices (iMOAB_AppID pid, int *coords_len, int *dim, double *coordinates)
 Create vertices for an app; it assumes no other vertices. More...
 
ErrCode iMOAB_CreateElements (iMOAB_AppID pid, int *num_elem, int *type, int *num_nodes_per_element, int *connectivity, int *block_ID)
 Create elements for an app; it assumes connectivity from local vertices, in order. More...
 
ErrCode iMOAB_SetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce. More...
 
ErrCode iMOAB_GetGlobalInfo (iMOAB_AppID pid, int *num_global_verts, int *num_global_elems)
 Get global information about number of vertices and number of elements. More...
 
ErrCode iMOAB_ResolveSharedEntities (iMOAB_AppID pid, int *num_verts, int *marker)
 Resolve shared entities using global markers on shared vertices. More...
 
ErrCode iMOAB_DetermineGhostEntities (iMOAB_AppID pid, int *ghost_dim, int *num_ghost_layers, int *bridge_dim)
 Create the requested number of ghost layers for the parallel mesh. More...
 
ErrCode iMOAB_SendMesh (iMOAB_AppID pid, MPI_Comm *join, MPI_Group *receivingGroup, int *rcompid, int *method)
 
ErrCode iMOAB_ReceiveMesh (iMOAB_AppID pid, MPI_Comm *join, MPI_Group *sendingGroup, int *scompid)
 
ErrCode iMOAB_SendElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *join, int *context_id)
 
ErrCode iMOAB_ReceiveElementTag (iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm *join, int *context_id)
 
ErrCode iMOAB_FreeSenderBuffers (iMOAB_AppID pid, int *context_id)
 
ErrCode iMOAB_ComputeCommGraph (iMOAB_AppID pid1, iMOAB_AppID pid2, MPI_Comm *join, MPI_Group *group1, MPI_Group *group2, int *type1, int *type2, int *comp1, int *comp2)
 compute a comm graph between 2 moab apps, based on ID matching Operations: Collective More...
 
ErrCode iMOAB_MergeVertices (iMOAB_AppID pid)
 

Variables

static struct GlobalContext context
 

Function Documentation

◆ iMOAB_ComputeCommGraph()

ErrCode iMOAB_ComputeCommGraph ( iMOAB_AppID  pid1,
iMOAB_AppID  pid2,
MPI_Comm *  join,
MPI_Group *  group1,
MPI_Group *  group2,
int *  type1,
int *  type2,
int *  comp1,
int *  comp2 
)

compute a comm graph between 2 moab apps, based on ID matching Operations: Collective

Definition at line 2836 of file iMOAB.cpp.

2845 {
2846  assert( join );
2847  assert( group1 );
2848  assert( group2 );
2849  ErrorCode rval = MB_SUCCESS;
2850  int localRank = 0, numProcs = 1;
2851 
2852  bool isFortran = false;
2853  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2854  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2855 
2856  MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2857  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2858  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2859 
2860  MPI_Comm_rank( global, &localRank );
2861  MPI_Comm_size( global, &numProcs );
2862  // instantiate the par comm graph
2863 
2864  // we should search if we have another pcomm with the same comp ids in the list already
2865  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2866  bool already_exists = false;
2867  if( *pid1 >= 0 )
2868  {
2869  appData& data = context.appDatas[*pid1];
2870  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
2871  if( mt != data.pgraph.end() ) already_exists = true;
2872  }
2873  if( *pid2 >= 0 )
2874  {
2875  appData& data = context.appDatas[*pid2];
2876  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
2877  if( mt != data.pgraph.end() ) already_exists = true;
2878  }
2879  // nothing to do if it already exists
2880  if( already_exists )
2881  {
2882 #ifdef VERBOSE
2883  if( !localRank )
2884  std::cout << " parcomgraph already existing between components " << *comp1 << " and " << *comp2
2885  << ". Do not compute again\n";
2886 #endif
2887  return moab::MB_SUCCESS;
2888  }
2889 
2890  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2891  // int coid2)
2892  ParCommGraph* cgraph = NULL;
2893  if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2894  ParCommGraph* cgraph_rev = NULL;
2895  if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2896 
2897  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2898  if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2899  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2900  // model on the joint comm
2901  TupleList TLcomp1;
2902  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2903  TupleList TLcomp2;
2904  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2905  // will push_back a new tuple, if needed
2906 
2907  TLcomp1.enableWriteAccess();
2908 
2909  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2910  Tag gdsTag;
2911  // find the values on first cell
2912  int lenTagType1 = 1;
2913  if( 1 == *type1 || 1 == *type2 )
2914  {
2915  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2916  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2917  }
2918  Tag tagType2 = context.MBI->globalId_tag();
2919 
2920  std::vector< int > valuesComp1;
2921  // populate first tuple
2922  if( *pid1 >= 0 )
2923  {
2924  appData& data1 = context.appDatas[*pid1];
2925  EntityHandle fset1 = data1.file_set;
2926  // in case of tempest remap, get the coverage set
2927 #ifdef MOAB_HAVE_TEMPESTREMAP
2928  if( data1.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2929  {
2930  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2931  // should still have only quads ?
2932  }
2933 #endif
2934  Range ents_of_interest;
2935  if( *type1 == 1 )
2936  {
2937  assert( gdsTag );
2938  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2939  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2940  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2941  }
2942  else if( *type1 == 2 )
2943  {
2944  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2945  valuesComp1.resize( ents_of_interest.size() );
2946  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2947  }
2948  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2949  {
2950  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2951  valuesComp1.resize( ents_of_interest.size() );
2952  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2953  }
2954  else
2955  {
2956  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2957  }
2958  // now fill the tuple list with info and markers
2959  // because we will send only the ids, order and compress the list
2960  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2961  TLcomp1.resize( uniq.size() );
2962  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2963  {
2964  // to proc, marker, element local index, index in el
2965  int marker = *sit;
2966  int to_proc = marker % numProcs;
2967  int n = TLcomp1.get_n();
2968  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2969  TLcomp1.vi_wr[2 * n + 1] = marker;
2970  TLcomp1.inc_n();
2971  }
2972  }
2973 
2974  ProcConfig pc( global ); // proc config does the crystal router
2975  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2976  0 ); // communication towards joint tasks, with markers
2977  // sort by value (key 1)
2978 #ifdef VERBOSE
2979  std::stringstream ff1;
2980  ff1 << "TLcomp1_" << localRank << ".txt";
2981  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2982 #endif
2983  moab::TupleList::buffer sort_buffer;
2984  sort_buffer.buffer_init( TLcomp1.get_n() );
2985  TLcomp1.sort( 1, &sort_buffer );
2986  sort_buffer.reset();
2987 #ifdef VERBOSE
2988  // after sorting
2989  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2990 #endif
2991  // do the same, for the other component, number2, with type2
2992  // start copy
2993  TLcomp2.enableWriteAccess();
2994  // populate second tuple
2995  std::vector< int > valuesComp2;
2996  if( *pid2 >= 0 )
2997  {
2998  appData& data2 = context.appDatas[*pid2];
2999  EntityHandle fset2 = data2.file_set;
3000  // in case of tempest remap, get the coverage set
3001 #ifdef MOAB_HAVE_TEMPESTREMAP
3002  if( data2.tempestData.remapper != NULL ) // this is the case this is part of intx;;
3003  {
3004  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3005  // should still have only quads ?
3006  }
3007 #endif
3008 
3009  Range ents_of_interest;
3010  if( *type2 == 1 )
3011  {
3012  assert( gdsTag );
3013  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3014  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
3015  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
3016  }
3017  else if( *type2 == 2 )
3018  {
3019  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3020  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3021  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3022  }
3023  else if( *type2 == 3 )
3024  {
3025  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
3026  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3027  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3028  }
3029  else
3030  {
3031  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3032  }
3033  // now fill the tuple list with info and markers
3034  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3035  TLcomp2.resize( uniq.size() );
3036  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3037  {
3038  // to proc, marker, element local index, index in el
3039  int marker = *sit;
3040  int to_proc = marker % numProcs;
3041  int n = TLcomp2.get_n();
3042  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3043  TLcomp2.vi_wr[2 * n + 1] = marker;
3044  TLcomp2.inc_n();
3045  }
3046  }
3047  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3048  0 ); // communication towards joint tasks, with markers
3049  // sort by value (key 1)
3050 #ifdef VERBOSE
3051  std::stringstream ff2;
3052  ff2 << "TLcomp2_" << localRank << ".txt";
3053  TLcomp2.print_to_file( ff2.str().c_str() );
3054 #endif
3055  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3056  TLcomp2.sort( 1, &sort_buffer );
3057  sort_buffer.reset();
3058  // end copy
3059 #ifdef VERBOSE
3060  TLcomp2.print_to_file( ff2.str().c_str() );
3061 #endif
3062  // need to send back the info, from the rendezvous point, for each of the values
3063  /* so go over each value, on local process in joint communicator
3064 
3065  now have to send back the info needed for communication;
3066  loop in in sync over both TLComp1 and TLComp2, in local process;
3067  So, build new tuple lists, to send synchronous communication
3068  populate them at the same time, based on marker, that is indexed
3069  */
3070 
3071  TupleList TLBackToComp1;
3072  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3073  TLBackToComp1.enableWriteAccess();
3074 
3075  TupleList TLBackToComp2;
3076  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3077  TLBackToComp2.enableWriteAccess();
3078 
3079  int n1 = TLcomp1.get_n();
3080  int n2 = TLcomp2.get_n();
3081 
3082  int indexInTLComp1 = 0;
3083  int indexInTLComp2 = 0; // advance both, according to the marker
3084  if( n1 > 0 && n2 > 0 )
3085  {
3086 
3087  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3088  {
3089  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3090  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3091  if( currentValue1 < currentValue2 )
3092  {
3093  // we have a big problem; basically, we are saying that
3094  // dof currentValue is on one model and not on the other
3095  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3096  indexInTLComp1++;
3097  continue;
3098  }
3099  if( currentValue1 > currentValue2 )
3100  {
3101  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3102  indexInTLComp2++;
3103  continue;
3104  }
3105  int size1 = 1;
3106  int size2 = 1;
3107  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3108  size1++;
3109  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3110  size2++;
3111  // must be found in both lists, find the start and end indices
3112  for( int i1 = 0; i1 < size1; i1++ )
3113  {
3114  for( int i2 = 0; i2 < size2; i2++ )
3115  {
3116  // send the info back to components
3117  int n = TLBackToComp1.get_n();
3118  TLBackToComp1.reserve();
3119  TLBackToComp1.vi_wr[3 * n] =
3120  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3121  // came from, info from comp2
3122  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3123  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3124  n = TLBackToComp2.get_n();
3125  TLBackToComp2.reserve();
3126  TLBackToComp2.vi_wr[3 * n] =
3127  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3128  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3129  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3130  // what if there are repeated markers in TLcomp2? increase just index2
3131  }
3132  }
3133  indexInTLComp1 += size1;
3134  indexInTLComp2 += size2;
3135  }
3136  }
3137  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3138  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3139 
3140  if( *pid1 >= 0 )
3141  {
3142  // we are on original comp 1 tasks
3143  // before ordering
3144  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3145  // processors it communicates with
3146 #ifdef VERBOSE
3147  std::stringstream f1;
3148  f1 << "TLBack1_" << localRank << ".txt";
3149  TLBackToComp1.print_to_file( f1.str().c_str() );
3150 #endif
3151  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3152  TLBackToComp1.sort( 1, &sort_buffer );
3153  sort_buffer.reset();
3154 #ifdef VERBOSE
3155  TLBackToComp1.print_to_file( f1.str().c_str() );
3156 #endif
3157  // so we are now on pid1, we know now each marker were it has to go
3158  // add a new method to ParCommGraph, to set up the involved_IDs_map
3159  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3160  }
3161  if( *pid2 >= 0 )
3162  {
3163  // we are on original comp 1 tasks
3164  // before ordering
3165  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3166  // processors it communicates with
3167 #ifdef VERBOSE
3168  std::stringstream f2;
3169  f2 << "TLBack2_" << localRank << ".txt";
3170  TLBackToComp2.print_to_file( f2.str().c_str() );
3171 #endif
3172  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3173  TLBackToComp2.sort( 2, &sort_buffer );
3174  sort_buffer.reset();
3175 #ifdef VERBOSE
3176  TLBackToComp2.print_to_file( f2.str().c_str() );
3177 #endif
3178  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3179  //
3180  }
3181  return moab::MB_SUCCESS;
3182 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MBQUAD, MBVERTEX, appData::pgraph, moab::TupleList::print_to_file(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::TupleList::resize(), moab::ParCommGraph::settle_comm_by_ids(), moab::Range::size(), moab::TupleList::sort(), moab::Interface::tag_get_data(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::TupleList::vi_rd, and moab::TupleList::vi_wr.

◆ iMOAB_CreateElements()

ErrCode iMOAB_CreateElements ( iMOAB_AppID  pid,
int *  num_elem,
int *  type,
int *  num_nodes_per_element,
int *  connectivity,
int *  block_ID 
)

Create elements for an app; it assumes connectivity from local vertices, in order.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_elem(int*) Number of elements.
[in]type(int*) Type of element (moab type).
[in]num_nodes_per_element(int*) Number of nodes per element.
[in]connectivity(int *) Connectivity array, with respect to vertices (assumed contiguous).
[in]block_ID(int *) The local block identifier, which will now contain the elements.
Returns
ErrCode The error code indicating success or failure.

add the new ents to the clock set

Definition at line 2239 of file iMOAB.cpp.

2245 {
2246  // Create elements
2247  appData& data = context.appDatas[*pid];
2248 
2249  ReadUtilIface* read_iface;
2250  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2251 
2252  EntityType mbtype = (EntityType)( *type );
2253  EntityHandle actual_start_handle;
2254  EntityHandle* array = NULL;
2255  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2256 
2257  // fill up with actual connectivity from input; assume the vertices are in order, and start
2258  // vertex is the first in the current data vertex range
2259  EntityHandle firstVertex = data.local_verts[0];
2260 
2261  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2262  {
2263  array[j] = connectivity[j] + firstVertex - 1;
2264  } // assumes connectivity uses 1 based array (from fortran, mostly)
2265 
2266  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2267 
2268  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2269 
2270  data.primary_elems.merge( new_elems );
2271 
2272  // add to adjacency
2273  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2274  // organize all new elements in block, with the given block ID; if the block set is not
2275  // existing, create a new mesh set;
2276  Range sets;
2277  int set_no = *block_ID;
2278  const void* setno_ptr = &set_no;
2280  sets );
2281  EntityHandle block_set;
2282 
2283  if( MB_FAILURE == rval || sets.empty() )
2284  {
2285  // create a new set, with this block ID
2286  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2287 
2288  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2289 
2290  // add the material set to file set
2291  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2292  }
2293  else
2294  {
2295  block_set = sets[0];
2296  } // first set is the one we want
2297 
2298  /// add the new ents to the clock set
2299  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2300 
2301  return moab::MB_SUCCESS;
2302 }

References moab::Interface::add_entities(), GlobalContext::appDatas, context, moab::Interface::create_meshset(), moab::Range::empty(), ErrorCode, appData::file_set, moab::ReadUtilIface::get_element_connect(), moab::Interface::get_entities_by_type_and_tag(), appData::local_verts, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, moab::Range::merge(), MESHSET_SET, appData::primary_elems, moab::Interface::query_interface(), moab::Interface::tag_set_data(), and moab::ReadUtilIface::update_adjacencies().

◆ iMOAB_CreateVertices()

ErrCode iMOAB_CreateVertices ( iMOAB_AppID  pid,
int *  coords_len,
int *  dim,
double *  coordinates 
)

Create vertices for an app; it assumes no other vertices.

Note
Operations: Not collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_len(int*) Size of the coordinates array (nverts * dim).
[in]dim(int*) Dimension of the vertex coordinates (usually 3).
[in]coordinates(double*) Coordinates of all vertices, interleaved.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2218 of file iMOAB.cpp.

2219 {
2220  ErrorCode rval;
2221  appData& data = context.appDatas[*pid];
2222 
2223  if( !data.local_verts.empty() ) // we should have no vertices in the app
2224  {
2225  return moab::MB_FAILURE;
2226  }
2227 
2228  int nverts = *coords_len / *dim;
2229 
2230  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2231 
2232  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2233 
2234  // also add the vertices to the all_verts range
2235  data.all_verts.merge( data.local_verts );
2236  return moab::MB_SUCCESS;
2237 }

References moab::Interface::add_entities(), appData::all_verts, GlobalContext::appDatas, context, moab::Interface::create_vertices(), dim, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::merge().

◆ iMOAB_DefineTagStorage()

ErrCode iMOAB_DefineTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  tag_type,
int *  components_per_entity,
int *  tag_index 
)

Define a MOAB Tag corresponding to the application depending on requested types.

Note
In MOAB, for most solution vectors, we only need to create a "Dense", "Double" Tag. A sparse tag can be created too. If the tag is already existing in the file, it will not be created. If it is a new tag, memory will be allocated when setting the values. Default values are 0 for for integer tags, 0.0 for double tags, 0 for entity handle tags.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retrieve the data in MOAB.
[in]tag_type(int*) The type of MOAB tag (Dense/Sparse, Double/Int/EntityHandle), enum MOAB_TAG_TYPE.
[in]components_per_entity(int*) The total size of vector dimension per entity for the tag (e.g., number of doubles per entity).
[out]tag_index(int*) The tag index which can be used as identifier in synchronize methods.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1467 of file iMOAB.cpp.

1472 {
1473  // see if the tag is already existing, and if yes, check the type, length
1474  if( *tag_type < 0 || *tag_type > 5 )
1475  {
1476  return moab::MB_FAILURE;
1477  } // we have 6 types of tags supported so far
1478 
1479  DataType tagDataType;
1480  TagType tagType;
1481  void* defaultVal = NULL;
1482  int* defInt = new int[*components_per_entity];
1483  double* defDouble = new double[*components_per_entity];
1484  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1485 
1486  for( int i = 0; i < *components_per_entity; i++ )
1487  {
1488  defInt[i] = 0;
1489  defDouble[i] = -1e+10;
1490  defHandle[i] = (EntityHandle)0;
1491  }
1492 
1493  switch( *tag_type )
1494  {
1495  case 0:
1496  tagDataType = MB_TYPE_INTEGER;
1497  tagType = MB_TAG_DENSE;
1498  defaultVal = defInt;
1499  break;
1500 
1501  case 1:
1502  tagDataType = MB_TYPE_DOUBLE;
1503  tagType = MB_TAG_DENSE;
1504  defaultVal = defDouble;
1505  break;
1506 
1507  case 2:
1508  tagDataType = MB_TYPE_HANDLE;
1509  tagType = MB_TAG_DENSE;
1510  defaultVal = defHandle;
1511  break;
1512 
1513  case 3:
1514  tagDataType = MB_TYPE_INTEGER;
1515  tagType = MB_TAG_SPARSE;
1516  defaultVal = defInt;
1517  break;
1518 
1519  case 4:
1520  tagDataType = MB_TYPE_DOUBLE;
1521  tagType = MB_TAG_SPARSE;
1522  defaultVal = defDouble;
1523  break;
1524 
1525  case 5:
1526  tagDataType = MB_TYPE_HANDLE;
1527  tagType = MB_TAG_SPARSE;
1528  defaultVal = defHandle;
1529  break;
1530 
1531  default: {
1532  delete[] defInt;
1533  delete[] defDouble;
1534  delete[] defHandle;
1535  return moab::MB_FAILURE;
1536  } // error
1537  }
1538 
1539  Tag tagHandle;
1540  // split storage names if separated list
1541 
1542  std::string tag_name( tag_storage_name );
1543 
1544  // first separate the names of the tags
1545  // we assume that there are separators ":" between the tag names
1546  std::vector< std::string > tagNames;
1547  std::string separator( ":" );
1548  split_tag_names( tag_name, separator, tagNames );
1549 
1550  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1551  appData& data = context.appDatas[*pid];
1552  int already_defined_tags = 0;
1553 
1554  for( size_t i = 0; i < tagNames.size(); i++ )
1555  {
1556  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1557  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1558 
1559  if( MB_ALREADY_ALLOCATED == rval )
1560  {
1561  std::map< std::string, Tag >& mTags = data.tagMap;
1562  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1563 
1564  if( mit == mTags.end() )
1565  {
1566  // add it to the map
1567  mTags[tagNames[i]] = tagHandle;
1568  // push it to the list of tags, too
1569  *tag_index = (int)data.tagList.size();
1570  data.tagList.push_back( tagHandle );
1571  }
1572  rval = MB_SUCCESS;
1573  already_defined_tags++;
1574  }
1575  else if( MB_SUCCESS == rval )
1576  {
1577  data.tagMap[tagNames[i]] = tagHandle;
1578  *tag_index = (int)data.tagList.size();
1579  data.tagList.push_back( tagHandle );
1580  }
1581  else
1582  {
1583  rval = moab::MB_FAILURE; // some tags were not created
1584  }
1585  }
1586  // we don't need default values anymore, avoid leaks
1587  int rankHere = 0;
1588 #ifdef MOAB_HAVE_MPI
1589  ParallelComm* pco = context.pcomms[*pid];
1590  rankHere = pco->rank();
1591 #endif
1592  if( !rankHere && already_defined_tags )
1593  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1594  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1595  << " tags \n";
1596  delete[] defInt;
1597  delete[] defDouble;
1598  delete[] defHandle;
1599  return rval;
1600 }

References GlobalContext::appDatas, context, ErrorCode, appData::global_id, MB_ALREADY_ALLOCATED, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_EXCL, MB_TAG_SPARSE, MB_TYPE_DOUBLE, MB_TYPE_HANDLE, MB_TYPE_INTEGER, GlobalContext::MBI, appData::name, GlobalContext::pcomms, moab::ParallelComm::rank(), split_tag_names(), moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and TagType.

Referenced by iMOAB_ReceiveMesh().

◆ iMOAB_DeregisterApplication()

ErrCode iMOAB_DeregisterApplication ( iMOAB_AppID  pid)

De-Register application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 331 of file iMOAB.cpp.

332 {
333  // the file set , parallel comm are all in vectors indexed by *pid
334  // assume we did not delete anything yet
335  // *pid will not be reused if we register another application
336  appData& data = context.appDatas[*pid];
337  int rankHere = 0;
338 #ifdef MOAB_HAVE_MPI
339  ParallelComm* pco = context.pcomms[*pid];
340  rankHere = pco->rank();
341 #endif
342  if( !rankHere )
343  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
344  << " is de-registered now \n";
345 
346  EntityHandle fileSet = data.file_set;
347  // get all entities part of the file set
348  Range fileents;
349  ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
350 
351  fileents.insert( fileSet );
352 
353  rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval ); // append all mesh sets
354 
355 #ifdef MOAB_HAVE_TEMPESTREMAP
356  if( data.tempestData.remapper ) delete data.tempestData.remapper;
357  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
358 #endif
359 
360 #ifdef MOAB_HAVE_MPI
361 
362  // we could get the pco also with
363  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
364 
365  std::map< int, ParCommGraph* >& pargs = data.pgraph;
366 
367  // free the parallel comm graphs associated with this app
368  for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
369  {
370  ParCommGraph* pgr = mt->second;
371  delete pgr;
372  pgr = NULL;
373  }
374  if( pco ) delete pco;
375 #endif
376 
377  // delete first all except vertices
378  Range vertices = fileents.subset_by_type( MBVERTEX );
379  Range noverts = subtract( fileents, vertices );
380 
381  rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
382  // now retrieve connected elements that still exist (maybe in other sets, pids?)
383  Range adj_ents_left;
384  rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
385  rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
386  rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
387 
388  if( !adj_ents_left.empty() )
389  {
390  Range conn_verts;
391  rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
392  vertices = subtract( vertices, conn_verts );
393  }
394 
395  rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
396 
397  std::map< std::string, int >::iterator mit;
398 
399  for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
400  {
401  int pidx = mit->second;
402 
403  if( *pid == pidx )
404  {
405  break;
406  }
407  }
408 
409  context.appIdMap.erase( mit );
410  std::map< int, int >::iterator mit1;
411 
412  for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
413  {
414  int pidx = mit1->second;
415 
416  if( *pid == pidx )
417  {
418  break;
419  }
420  }
421 
422  context.appIdCompMap.erase( mit1 );
423 
424  context.unused_pid--; // we have to go backwards always TODO
425  context.appDatas.pop_back();
426 #ifdef MOAB_HAVE_MPI
427  context.pcomms.pop_back();
428 #endif
429  return moab::MB_SUCCESS;
430 }

References GlobalContext::appDatas, GlobalContext::appIdCompMap, GlobalContext::appIdMap, context, moab::Interface::delete_entities(), moab::Range::empty(), ErrorCode, appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Interface::get_entities_by_type(), appData::global_id, moab::Range::insert(), MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, appData::name, GlobalContext::pcomms, appData::pgraph, moab::ParallelComm::rank(), moab::Range::subset_by_type(), moab::subtract(), moab::Interface::UNION, and GlobalContext::unused_pid.

Referenced by iMOAB_DeregisterApplicationFortran().

◆ iMOAB_DeregisterApplicationFortran()

ErrCode iMOAB_DeregisterApplicationFortran ( iMOAB_AppID  pid)

De-Register the Fortran based application: delete mesh (set) associated with the application ID.

Note
The associated communicator will be released, and all associated mesh entities and sets will be deleted from the mesh data structure. Associated tag storage data will be freed too.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID
Returns
ErrCode The error code indicating success or failure.

Definition at line 432 of file iMOAB.cpp.

433 {
434  // release any Fortran specific allocations here before we pass it on
435  context.appDatas[*pid].is_fortran = false;
436 
437  // release all datastructure allocations
438  return iMOAB_DeregisterApplication( pid );
439 }

References GlobalContext::appDatas, context, and iMOAB_DeregisterApplication().

◆ iMOAB_DetermineGhostEntities()

ErrCode iMOAB_DetermineGhostEntities ( iMOAB_AppID  pid,
int *  ghost_dim,
int *  num_ghost_layers,
int *  bridge_dim 
)

Create the requested number of ghost layers for the parallel mesh.

Note
The routine assumes that the shared entities were resolved successfully, and that the mesh is properly distributed on separate tasks.

Operations: Collective.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]ghost_dim(int*) Desired ghost dimension (2 or 3, most of the time).
[in]num_ghost_layers(int*) Number of ghost layers requested.
[in]bridge_dim(int*) Bridge dimension (0 for vertices, 1 for edges, 2 for faces).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2381 of file iMOAB.cpp.

2382 {
2383  ErrorCode rval;
2384 
2385  // verify we have valid ghost layers input specified. If invalid, exit quick.
2386  if( num_ghost_layers && *num_ghost_layers <= 0 )
2387  {
2388  return moab::MB_SUCCESS;
2389  } // nothing to do
2390 
2391  appData& data = context.appDatas[*pid];
2392  ParallelComm* pco = context.pcomms[*pid];
2393 
2394  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2395  constexpr int addl_ents = 0;
2396  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2397  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2398  for( int i = 2; i <= *num_ghost_layers; i++ )
2399  {
2400  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2401  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2402  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2403  }
2404 
2405  // Update ghost layer information
2406  data.num_ghost_layers = *num_ghost_layers;
2407 
2408  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2409  return iMOAB_UpdateMeshInfo( pid );
2410 }

References GlobalContext::appDatas, context, moab::ParallelComm::correct_thin_ghost_layers(), ErrorCode, moab::ParallelComm::exchange_ghost_cells(), appData::file_set, iMOAB_UpdateMeshInfo(), MB_CHK_ERR, MB_SUCCESS, appData::num_ghost_layers, and GlobalContext::pcomms.

◆ iMOAB_Finalize()

ErrCode iMOAB_Finalize ( void  )

Finalize the iMOAB interface implementation.

It will delete the internally reference counted MOAB instance, if the reference count reaches 0.

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 187 of file iMOAB.cpp.

188 {
190 
191  if( 0 == context.refCountMB )
192  {
193  delete context.MBI;
194  }
195 
196  return MB_SUCCESS;
197 }

References context, MB_SUCCESS, GlobalContext::MBI, and GlobalContext::refCountMB.

◆ iMOAB_FreeSenderBuffers()

ErrCode iMOAB_FreeSenderBuffers ( iMOAB_AppID  pid,
int *  context_id 
)

Definition at line 2819 of file iMOAB.cpp.

2820 {
2821  // need first to find the pgraph that holds the information we need
2822  // this will be called on sender side only
2823  appData& data = context.appDatas[*pid];
2824  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2825  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2826 
2827  mt->second->release_send_buffers();
2828  return moab::MB_SUCCESS;
2829 }

References GlobalContext::appDatas, context, MB_SUCCESS, and appData::pgraph.

◆ iMOAB_GetBlockElementConnectivities()

ErrCode iMOAB_GetBlockElementConnectivities ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivities for elements contained within a certain block.

Note
input is the block ID. Should we change to visible block local index?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]connectivity_length(int*) The allocated size of array. (typical size := vertices_per_element*num_elements_in_block)
[out]element_connectivity(int*) The connectivity array to store element ordering in MOAB canonical numbering scheme (array allocated by user); array contains vertex indices in the local numbering order for vertices elements are in the same order as provided by GetElementOwnership and GetElementID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1159 of file iMOAB.cpp.

1163 {
1164  assert( global_block_ID ); // ensure global block ID argument is not null
1165  assert( connectivity_length ); // ensure connectivity length argument is not null
1166 
1167  appData& data = context.appDatas[*pid];
1168  std::map< int, int >& matMap = data.matIndex;
1169  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1170 
1171  if( it == matMap.end() )
1172  {
1173  return moab::MB_FAILURE;
1174  } // error in finding block with id
1175 
1176  int blockIndex = matMap[*global_block_ID];
1177  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1178  std::vector< EntityHandle > elems;
1179 
1180  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1181 
1182  if( elems.empty() )
1183  {
1184  return moab::MB_FAILURE;
1185  }
1186 
1187  std::vector< EntityHandle > vconnect;
1188  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1189 
1190  if( *connectivity_length != (int)vconnect.size() )
1191  {
1192  return moab::MB_FAILURE;
1193  } // mismatched sizes
1194 
1195  for( int i = 0; i < *connectivity_length; i++ )
1196  {
1197  int inx = data.all_verts.index( vconnect[i] );
1198 
1199  if( -1 == inx )
1200  {
1201  return moab::MB_FAILURE;
1202  } // error, vertex not in local range
1203 
1204  element_connectivity[i] = inx;
1205  }
1206 
1207  return moab::MB_SUCCESS;
1208 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, and GlobalContext::MBI.

◆ iMOAB_GetBlockID()

ErrCode iMOAB_GetBlockID ( iMOAB_AppID  pid,
int *  block_length,
iMOAB_GlobalID global_block_IDs 
)

Get the global block IDs for all locally visible (owned and shared/ghosted) blocks.

Note
Block IDs are corresponding to MATERIAL_SET tags for material sets. Usually the block ID is exported from Cubit as a unique integer value. First blocks are local, and next blocks are fully ghosted. First blocks have at least one owned cell/element, ghost blocks have only ghost cells. Internally, a block corresponds to a mesh set with a MATERIAL_SET tag value equal to the block ID.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]block_length(int*) The allocated size of array (typical size := num_visible_blocks).
[out]global_block_IDs(iMOAB_GlobalID*) The global IDs for all locally visible blocks (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1036 of file iMOAB.cpp.

1037 {
1038  // local id blocks? they are counted from 0 to number of visible blocks ...
1039  // will actually return material set tag value for global
1040  Range& matSets = context.appDatas[*pid].mat_sets;
1041 
1042  if( *block_length != (int)matSets.size() )
1043  {
1044  return moab::MB_FAILURE;
1045  }
1046 
1047  // return material set tag gtags[0 is material set tag
1048  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1049 
1050  // populate map with index
1051  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1052  for( unsigned i = 0; i < matSets.size(); i++ )
1053  {
1054  matIdx[global_block_IDs[i]] = i;
1055  }
1056 
1057  return moab::MB_SUCCESS;
1058 }

References GlobalContext::appDatas, context, ErrorCode, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetBlockInfo()

ErrCode iMOAB_GetBlockInfo ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  vertices_per_element,
int *  num_elements_in_block 
)

Get the global block information and number of visible elements of belonging to a block (MATERIAL SET).

Note
A block has to be homogeneous, it can contain elements of a single type.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set to be queried.
[out]vertices_per_element(int*) The number of vertices per element.
[out]num_elements_in_block(int*) The number of elements in block.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1060 of file iMOAB.cpp.

1064 {
1065  assert( global_block_ID );
1066 
1067  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1068  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1069 
1070  if( it == matMap.end() )
1071  {
1072  return moab::MB_FAILURE;
1073  } // error in finding block with id
1074 
1075  int blockIndex = matMap[*global_block_ID];
1076  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1077  Range blo_elems;
1078  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1079 
1080  if( MB_SUCCESS != rval || blo_elems.empty() )
1081  {
1082  return moab::MB_FAILURE;
1083  }
1084 
1085  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1086 
1087  if( !blo_elems.all_of_type( type ) )
1088  {
1089  return moab::MB_FAILURE;
1090  } // not all of same type
1091 
1092  const EntityHandle* conn = NULL;
1093  int num_verts = 0;
1094  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1095 
1096  *vertices_per_element = num_verts;
1097  *num_elements_in_block = (int)blo_elems.size();
1098 
1099  return moab::MB_SUCCESS;
1100 }

References moab::Range::all_of_type(), GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_handle(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::type_from_handle().

◆ iMOAB_GetDoubleTagStorage()

ErrCode iMOAB_GetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Retrieve the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names to retrieve the data in MOAB.
[in]num_tag_storage_length(int) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity*num_tags or num_visible_elements*components_per_entity*num_tags)
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 2035 of file iMOAB.cpp.

2040 {
2041  ErrorCode rval;
2042  // exactly the same code, except tag type check
2043  std::string tag_names( tag_storage_names );
2044  // exactly the same code as for int tag :) maybe should check the type of tag too
2045  std::vector< std::string > tagNames;
2046  std::vector< Tag > tagHandles;
2047  std::string separator( ":" );
2048  split_tag_names( tag_names, separator, tagNames );
2049 
2050  appData& data = context.appDatas[*pid];
2051 
2052  // set it on a subset of entities, based on type and length
2053  Range* ents_to_get = NULL;
2054 
2055  if( *ent_type == 0 ) // vertices
2056  {
2057  ents_to_get = &data.all_verts;
2058  }
2059  else if( *ent_type == 1 )
2060  {
2061  ents_to_get = &data.primary_elems;
2062  }
2063  int nents_to_get = (int)ents_to_get->size();
2064  int position = 0;
2065  for( size_t i = 0; i < tagNames.size(); i++ )
2066  {
2067  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2068  {
2069  return moab::MB_FAILURE;
2070  } // tag not defined
2071 
2072  Tag tag = data.tagMap[tagNames[i]];
2073 
2074  int tagLength = 0;
2075  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2076 
2077  DataType dtype;
2078  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2079 
2080  if( dtype != MB_TYPE_DOUBLE )
2081  {
2082  return moab::MB_FAILURE;
2083  }
2084 
2085  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2086  return moab::MB_FAILURE; // too many entity values to get
2087 
2088  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2089  position = position + nents_to_get * tagLength;
2090  }
2091 
2092  return moab::MB_SUCCESS; // no error
2093 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetElementConnectivity()

ErrCode iMOAB_GetElementConnectivity ( iMOAB_AppID  pid,
iMOAB_LocalID elem_index,
int *  connectivity_length,
int *  element_connectivity 
)

Get the connectivity for one element only.

Note
This is a convenience method and in general should not be used unless necessary. You should use colaesced calls for multiple elements for efficiency.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]elem_index(iMOAB_LocalID *) Local element index.
[in,out]connectivity_length(int *) On input, maximum length of connectivity. On output, actual length.
[out]element_connectivity(int*) The connectivity array to store connectivity in MOAB canonical numbering scheme. Array contains vertex indices in the local numbering order for vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1210 of file iMOAB.cpp.

1214 {
1215  assert( elem_index ); // ensure element index argument is not null
1216  assert( connectivity_length ); // ensure connectivity length argument is not null
1217 
1218  appData& data = context.appDatas[*pid];
1219  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1220 
1221  int num_nodes;
1222  const EntityHandle* conn;
1223 
1224  EntityHandle eh = data.primary_elems[*elem_index];
1225 
1226  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1227 
1228  if( *connectivity_length < num_nodes )
1229  {
1230  return moab::MB_FAILURE;
1231  } // wrong number of vertices
1232 
1233  for( int i = 0; i < num_nodes; i++ )
1234  {
1235  int index = data.all_verts.index( conn[i] );
1236 
1237  if( -1 == index )
1238  {
1239  return moab::MB_FAILURE;
1240  }
1241 
1242  element_connectivity[i] = index;
1243  }
1244 
1245  *connectivity_length = num_nodes;
1246 
1247  return moab::MB_SUCCESS;
1248 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_connectivity(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetElementID()

ErrCode iMOAB_GetElementID ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
iMOAB_GlobalID global_element_ID,
iMOAB_LocalID local_element_ID 
)

Get the global IDs for all locally visible elements belonging to a particular block.

Note
The method will return also the local index of each element, in the local range that contains all visible elements.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID*) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of global element ID array, same as num_elements_in_block returned from GetBlockInfo().
[out]global_element_ID(iMOAB_GlobalID*) The global IDs for all locally visible elements (array allocated by user).
[out]local_element_ID(iMOAB_LocalID*) (Optional) The local IDs for all locally visible elements (index in the range of all primary elements in the rank)
Returns
ErrCode The error code indicating success or failure.

Definition at line 1300 of file iMOAB.cpp.

1305 {
1306  assert( global_block_ID ); // ensure global block ID argument is not null
1307  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1308 
1309  appData& data = context.appDatas[*pid];
1310  std::map< int, int >& matMap = data.matIndex;
1311 
1312  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1313 
1314  if( it == matMap.end() )
1315  {
1316  return moab::MB_FAILURE;
1317  } // error in finding block with id
1318 
1319  int blockIndex = matMap[*global_block_ID];
1320  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1321  Range elems;
1322  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1323 
1324  if( elems.empty() )
1325  {
1326  return moab::MB_FAILURE;
1327  }
1328 
1329  if( *num_elements_in_block != (int)elems.size() )
1330  {
1331  return moab::MB_FAILURE;
1332  } // bad memory allocation
1333 
1334  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1335 
1336  // check that elems are among primary_elems in data
1337  for( int i = 0; i < *num_elements_in_block; i++ )
1338  {
1339  local_element_ID[i] = data.primary_elems.index( elems[i] );
1340 
1341  if( -1 == local_element_ID[i] )
1342  {
1343  return moab::MB_FAILURE;
1344  } // error, not in local primary elements
1345  }
1346 
1347  return moab::MB_SUCCESS;
1348 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, moab::Interface::get_entities_by_handle(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, appData::matIndex, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetElementOwnership()

ErrCode iMOAB_GetElementOwnership ( iMOAB_AppID  pid,
iMOAB_GlobalID global_block_ID,
int *  num_elements_in_block,
int *  element_ownership 
)

Get the element ownership within a certain block i.e., processor ID of the element owner.

Note
: Should we use local block index for input, instead of the global block ID ?

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]global_block_ID(iMOAB_GlobalID) The global block ID of the set being queried.
[in]num_elements_in_block(int*) The allocated size of ownership array, same as num_elements_in_block returned from GetBlockInfo().
[out]element_ownership(int*) The ownership array to store processor ID for all elements (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1250 of file iMOAB.cpp.

1254 {
1255  assert( global_block_ID ); // ensure global block ID argument is not null
1256  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1257 
1258  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1259 
1260  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1261 
1262  if( it == matMap.end() )
1263  {
1264  return moab::MB_FAILURE;
1265  } // error in finding block with id
1266 
1267  int blockIndex = matMap[*global_block_ID];
1268  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1269  Range elems;
1270 
1271  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1272 
1273  if( elems.empty() )
1274  {
1275  return moab::MB_FAILURE;
1276  }
1277 
1278  if( *num_elements_in_block != (int)elems.size() )
1279  {
1280  return moab::MB_FAILURE;
1281  } // bad memory allocation
1282 
1283  int i = 0;
1284 #ifdef MOAB_HAVE_MPI
1285  ParallelComm* pco = context.pcomms[*pid];
1286 #endif
1287 
1288  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1289  {
1290 #ifdef MOAB_HAVE_MPI
1291  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1292 #else
1293  element_ownership[i] = 0; /* owned by 0 */
1294 #endif
1295  }
1296 
1297  return moab::MB_SUCCESS;
1298 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::empty(), moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, GlobalContext::pcomms, and moab::Range::size().

◆ iMOAB_GetGlobalInfo()

ErrCode iMOAB_GetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Get global information about number of vertices and number of elements.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2312 of file iMOAB.cpp.

2313 {
2314  appData& data = context.appDatas[*pid];
2315  if( NULL != num_global_verts )
2316  {
2317  *num_global_verts = data.num_global_vertices;
2318  }
2319  if( NULL != num_global_elems )
2320  {
2321  *num_global_elems = data.num_global_elements;
2322  }
2323 
2324  return moab::MB_SUCCESS;
2325 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

◆ iMOAB_GetIntTagStorage()

ErrCode iMOAB_GetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Get the specified values in a MOAB integer Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to be copied from the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1659 of file iMOAB.cpp.

1664 {
1665  ErrorCode rval;
1666  std::string tag_name( tag_storage_name );
1667 
1668  appData& data = context.appDatas[*pid];
1669 
1670  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1671  {
1672  return moab::MB_FAILURE;
1673  } // tag not defined
1674 
1675  Tag tag = data.tagMap[tag_name];
1676 
1677  int tagLength = 0;
1678  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1679 
1680  DataType dtype;
1681  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1682 
1683  if( dtype != MB_TYPE_INTEGER )
1684  {
1685  return moab::MB_FAILURE;
1686  }
1687 
1688  // set it on a subset of entities, based on type and length
1689  Range* ents_to_get;
1690 
1691  if( *ent_type == 0 ) // vertices
1692  {
1693  ents_to_get = &data.all_verts;
1694  }
1695  else // if (*ent_type == 1)
1696  {
1697  ents_to_get = &data.primary_elems;
1698  }
1699 
1700  int nents_to_get = *num_tag_storage_length / tagLength;
1701 
1702  if( nents_to_get > (int)ents_to_get->size() )
1703  {
1704  return moab::MB_FAILURE;
1705  } // to many entities to get, or too little
1706 
1707  // restrict the range; everything is contiguous; or not?
1708  // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1709  // ) );
1710 
1711  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1712 
1713  return moab::MB_SUCCESS; // no error
1714 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), and appData::tagMap.

◆ iMOAB_GetMeshInfo()

ErrCode iMOAB_GetMeshInfo ( iMOAB_AppID  pid,
int *  num_visible_vertices,
int *  num_visible_elements,
int *  num_visible_blocks,
int *  num_visible_surfaceBC,
int *  num_visible_vertexBC 
)

Retrieve all the important mesh information related to vertices, elements, vertex and surface boundary conditions.

Note
Number of visible vertices and cells include ghost entities. All arrays returned have size 3. Local entities are first, then ghost entities are next. Shared vertices can be owned in MOAB sense by different tasks. Ghost vertices and cells are always owned by other tasks.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[out]num_visible_vertices(int*) The number of vertices in the current partition/process arranged. as: owned/shared only, ghosted, total_visible (array allocated by user, size := 3).
[out]num_visible_elements(int*) The number of elements in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_blocks(int*) The number of material sets in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_surfaceBC(int*) The number of mesh surfaces that have a NEUMANN_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
[out]num_visible_vertexBC(int*) The number of vertices that have a DIRICHLET_SET BC defined in local mesh in current partition/process arranged as: owned only, ghosted/shared, total_visible (array allocated by user, size := 3).
Returns
ErrCode The error code indicating success or failure.

Definition at line 878 of file iMOAB.cpp.

884 {
885  ErrorCode rval;
886  appData& data = context.appDatas[*pid];
887  EntityHandle fileSet = data.file_set;
888 
889  // this will include ghost elements
890  // first clear all data ranges; this can be called after ghosting
891  if( num_visible_elements )
892  {
893  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
894  // separate ghost and local/owned primary elements
895  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
896  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
897  }
898  if( num_visible_vertices )
899  {
900  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
901  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
902  // local are those that are not ghosts; they may include shared, not owned vertices
903  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
904  }
905 
906  if( num_visible_blocks )
907  {
909  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
910 
911  num_visible_blocks[2] = data.mat_sets.size();
912  num_visible_blocks[0] = num_visible_blocks[2];
913  num_visible_blocks[1] = 0;
914  }
915 
916  if( num_visible_surfaceBC )
917  {
919  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
920 
921  num_visible_surfaceBC[2] = 0;
922  // count how many faces are in each neu set, and how many regions are
923  // adjacent to them;
924  int numNeuSets = (int)data.neu_sets.size();
925 
926  for( int i = 0; i < numNeuSets; i++ )
927  {
928  Range subents;
929  EntityHandle nset = data.neu_sets[i];
930  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
931 
932  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
933  {
934  EntityHandle subent = *it;
935  Range adjPrimaryEnts;
936  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
937 
938  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
939  }
940  }
941 
942  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
943  num_visible_surfaceBC[1] = 0;
944  }
945 
946  if( num_visible_vertexBC )
947  {
949  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
950 
951  num_visible_vertexBC[2] = 0;
952  int numDiriSets = (int)data.diri_sets.size();
953 
954  for( int i = 0; i < numDiriSets; i++ )
955  {
956  Range verts;
957  EntityHandle diset = data.diri_sets[i];
958  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
959 
960  num_visible_vertexBC[2] += (int)verts.size();
961  }
962 
963  num_visible_vertexBC[0] = num_visible_vertexBC[2];
964  num_visible_vertexBC[1] = 0;
965  }
966 
967  return moab::MB_SUCCESS;
968 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, appData::file_set, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::primary_elems, moab::Range::size(), and moab::Interface::UNION.

◆ iMOAB_GetNeighborElements()

ErrCode iMOAB_GetNeighborElements ( iMOAB_AppID  pid,
iMOAB_LocalID local_index,
int *  num_adjacent_elements,
iMOAB_LocalID adjacent_element_IDs 
)

Retrieve the adjacencies for the element entities.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]local_index(iMOAB_LocalID*) The local element ID for which adjacency information is needed.
[out]num_adjacent_elements(int*) The total number of adjacent elements.
[out]adjacent_element_IDs(iMOAB_LocalID*) The local element IDs of all adjacent elements to the current one (typically, num_total_sides for internal elements or num_total_sides-num_sides_on_boundary for boundary elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2180 of file iMOAB.cpp.

2184 {
2185  ErrorCode rval;
2186 
2187  // one neighbor for each subentity of dimension-1
2188  MeshTopoUtil mtu( context.MBI );
2189  appData& data = context.appDatas[*pid];
2190  EntityHandle eh = data.primary_elems[*local_index];
2191  Range adjs;
2192  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2193 
2194  if( *num_adjacent_elements < (int)adjs.size() )
2195  {
2196  return moab::MB_FAILURE;
2197  } // not dimensioned correctly
2198 
2199  *num_adjacent_elements = (int)adjs.size();
2200 
2201  for( int i = 0; i < *num_adjacent_elements; i++ )
2202  {
2203  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2204  }
2205 
2206  return moab::MB_SUCCESS;
2207 }

References GlobalContext::appDatas, context, appData::dimension, ErrorCode, moab::MeshTopoUtil::get_bridge_adjacencies(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::primary_elems, and moab::Range::size().

◆ iMOAB_GetPointerToSurfaceBC()

ErrCode iMOAB_GetPointerToSurfaceBC ( iMOAB_AppID  pid,
int *  surface_BC_length,
iMOAB_LocalID local_element_ID,
int *  reference_surface_ID,
int *  boundary_condition_value 
)

Get the surface boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]surface_BC_length(int*) The allocated size of surface boundary condition array, same as num_visible_surfaceBC returned by GetMeshInfo().
[out]local_element_ID(iMOAB_LocalID*) The local element IDs that contains the side with the surface BC.
[out]reference_surface_ID(int*) The surface number with the BC in the canonical reference element (e.g., 1 to 6 for HEX, 1-4 for TET).
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the NeumannSet defined on the element).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1350 of file iMOAB.cpp.

1355 {
1356  // we have to fill bc data for neumann sets;/
1357  ErrorCode rval;
1358 
1359  // it was counted above, in GetMeshInfo
1360  appData& data = context.appDatas[*pid];
1361  int numNeuSets = (int)data.neu_sets.size();
1362 
1363  int index = 0; // index [0, surface_BC_length) for the arrays returned
1364 
1365  for( int i = 0; i < numNeuSets; i++ )
1366  {
1367  Range subents;
1368  EntityHandle nset = data.neu_sets[i];
1369  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1370 
1371  int neuVal;
1372  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1373 
1374  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1375  {
1376  EntityHandle subent = *it;
1377  Range adjPrimaryEnts;
1378  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1379 
1380  // get global id of the primary ents, and side number of the quad/subentity
1381  // this is moab ordering
1382  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1383  {
1384  EntityHandle primaryEnt = *pit;
1385  // get global id
1386  /*int globalID;
1387  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1388  if (MB_SUCCESS!=rval)
1389  return moab::MB_FAILURE;
1390  global_element_ID[index] = globalID;*/
1391 
1392  // get local element id
1393  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1394 
1395  if( -1 == local_element_ID[index] )
1396  {
1397  return moab::MB_FAILURE;
1398  } // did not find the element locally
1399 
1400  int side_number, sense, offset;
1401  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1402 
1403  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1404  boundary_condition_value[index] = neuVal;
1405  index++;
1406  }
1407  }
1408  }
1409 
1410  if( index != *surface_BC_length )
1411  {
1412  return moab::MB_FAILURE;
1413  } // error in array allocations
1414 
1415  return moab::MB_SUCCESS;
1416 }

References GlobalContext::appDatas, moab::Range::begin(), context, appData::dimension, moab::Range::end(), ErrorCode, moab::Interface::get_adjacencies(), moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, appData::neu_sets, GlobalContext::neumann_tag, appData::primary_elems, moab::Interface::side_number(), moab::side_number(), moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetPointerToVertexBC()

ErrCode iMOAB_GetPointerToVertexBC ( iMOAB_AppID  pid,
int *  vertex_BC_length,
iMOAB_LocalID local_vertex_ID,
int *  boundary_condition_value 
)

Get the vertex boundary condition information.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertex_BC_length(int) The allocated size of vertex boundary condition array, same as num_visible_vertexBC returned by GetMeshInfo().
[out]local_vertex_ID(iMOAB_LocalID*) The local vertex ID that has Dirichlet BC defined.
[out]boundary_condition_value(int*) The boundary condition type as obtained from the mesh description (value of the Dirichlet_Set tag defined on the vertex).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1418 of file iMOAB.cpp.

1422 {
1423  ErrorCode rval;
1424 
1425  // it was counted above, in GetMeshInfo
1426  appData& data = context.appDatas[*pid];
1427  int numDiriSets = (int)data.diri_sets.size();
1428  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1429 
1430  for( int i = 0; i < numDiriSets; i++ )
1431  {
1432  Range verts;
1433  EntityHandle diset = data.diri_sets[i];
1434  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1435 
1436  int diriVal;
1437  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1438 
1439  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1440  {
1441  EntityHandle vt = *vit;
1442  /*int vgid;
1443  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1444  if (MB_SUCCESS!=rval)
1445  return moab::MB_FAILURE;
1446  global_vertext_ID[index] = vgid;*/
1447  local_vertex_ID[index] = data.all_verts.index( vt );
1448 
1449  if( -1 == local_vertex_ID[index] )
1450  {
1451  return moab::MB_FAILURE;
1452  } // vertex was not found
1453 
1454  boundary_condition_value[index] = diriVal;
1455  index++;
1456  }
1457  }
1458 
1459  if( *vertex_BC_length != index )
1460  {
1461  return moab::MB_FAILURE;
1462  } // array allocation issue
1463 
1464  return moab::MB_SUCCESS;
1465 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_dimension(), moab::Range::index(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexID()

ErrCode iMOAB_GetVertexID ( iMOAB_AppID  pid,
int *  vertices_length,
iMOAB_GlobalID global_vertex_ID 
)

Get the global vertex IDs for all locally visible (owned and shared/ghosted) vertices.

Note
The array should be allocated by the user, sized with the total number of visible vertices from iMOAB_GetMeshInfo method.

Operations: Not collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typical size := num_visible_vertices).
[out]global_vertex_ID(iMOAB_GlobalID*) The global IDs for all locally visible vertices (array allocated by user).
Returns
ErrCode The error code indicating success or failure.

Definition at line 970 of file iMOAB.cpp.

971 {
972  IMOAB_CHECKPOINTER( vertices_length, 2 );
973  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
974 
975  const Range& verts = context.appDatas[*pid].all_verts;
976  // check for problems with array length
977  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
978 
979  // global id tag is context.globalID_tag
980  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
981 }

References GlobalContext::appDatas, context, GlobalContext::globalID_tag, IMOAB_ASSERT, IMOAB_CHECKPOINTER, GlobalContext::MBI, moab::Range::size(), and moab::Interface::tag_get_data().

◆ iMOAB_GetVertexOwnership()

ErrCode iMOAB_GetVertexOwnership ( iMOAB_AppID  pid,
int *  vertices_length,
int *  visible_global_rank_ID 
)

Get vertex ownership information.

For each vertex based on the local ID, return the process that owns the vertex (local, shared or ghost)

Note
Shared vertices could be owned by different tasks. Local and shared vertices are first, ghost vertices are next in the array. Ghost vertices are always owned by a different process ID. Array allocated by the user with total size of visible vertices.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]vertices_length(int*) The allocated size of array (typically size := num_visible_vertices).
[out]visible_global_rank_ID(int*) The processor rank owning each of the local vertices.
Returns
ErrCode The error code indicating success or failure.

Definition at line 983 of file iMOAB.cpp.

984 {
985  assert( vertices_length && *vertices_length );
986  assert( visible_global_rank_ID );
987 
988  Range& verts = context.appDatas[*pid].all_verts;
989  int i = 0;
990 #ifdef MOAB_HAVE_MPI
991  ParallelComm* pco = context.pcomms[*pid];
992 
993  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
994  {
995  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
996  }
997 
998  if( i != *vertices_length )
999  {
1000  return moab::MB_FAILURE;
1001  } // warning array allocation problem
1002 
1003 #else
1004 
1005  /* everything owned by proc 0 */
1006  if( (int)verts.size() != *vertices_length )
1007  {
1008  return moab::MB_FAILURE;
1009  } // warning array allocation problem
1010 
1011  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1012  {
1013  visible_global_rank_ID[i] = 0;
1014  } // all vertices are owned by processor 0, as this is serial run
1015 
1016 #endif
1017 
1018  return moab::MB_SUCCESS;
1019 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::ParallelComm::get_owner(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::pcomms, and moab::Range::size().

◆ iMOAB_GetVisibleElementsInfo()

ErrCode iMOAB_GetVisibleElementsInfo ( iMOAB_AppID  pid,
int *  num_visible_elements,
iMOAB_GlobalID element_global_IDs,
int *  ranks,
iMOAB_GlobalID block_IDs 
)

Get the visible elements information.

Return for all visible elements the global IDs, ranks they belong to, block ids they belong to.

Note
Operations: Not Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_visible_elements(int*) The number of visible elements (returned by GetMeshInfo).
[out]element_global_IDs(iMOAB_GlobalID*) Element global ids to be added to the block.
[out]ranks(int*) The owning ranks of elements.
[out]block_IDs(iMOAB_GlobalID*) The block ids containing the elements.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1102 of file iMOAB.cpp.

1107 {
1108  appData& data = context.appDatas[*pid];
1109 #ifdef MOAB_HAVE_MPI
1110  ParallelComm* pco = context.pcomms[*pid];
1111 #endif
1112 
1113  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1114 
1115  int i = 0;
1116 
1117  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1118  {
1119 #ifdef MOAB_HAVE_MPI
1120  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1121 
1122 #else
1123  /* everything owned by task 0 */
1124  ranks[i] = 0;
1125 #endif
1126  }
1127 
1128  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1129  {
1130  EntityHandle matMeshSet = *mit;
1131  Range elems;
1132  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1133 
1134  int valMatTag;
1135  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1136 
1137  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1138  {
1139  EntityHandle eh = *eit;
1140  int index = data.primary_elems.index( eh );
1141 
1142  if( -1 == index )
1143  {
1144  return moab::MB_FAILURE;
1145  }
1146 
1147  if( -1 >= *num_visible_elements )
1148  {
1149  return moab::MB_FAILURE;
1150  }
1151 
1152  block_IDs[index] = valMatTag;
1153  }
1154  }
1155 
1156  return moab::MB_SUCCESS;
1157 }

References GlobalContext::appDatas, moab::Range::begin(), context, moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), moab::ParallelComm::get_owner(), GlobalContext::globalID_tag, moab::Range::index(), appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, GlobalContext::pcomms, appData::primary_elems, and moab::Interface::tag_get_data().

◆ iMOAB_GetVisibleVerticesCoordinates()

ErrCode iMOAB_GetVisibleVerticesCoordinates ( iMOAB_AppID  pid,
int *  coords_length,
double *  coordinates 
)

Get vertex coordinates for all local (owned and ghosted) vertices.

Note
coordinates are returned in an array allocated by user, interleaved. (do need an option for blocked coordinates ?) size of the array is dimension times number of visible vertices. The local ordering is implicit, owned/shared vertices are first, then ghosts.

Operations: Not Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]coords_length(int*) The size of the allocated coordinate array (array allocated by user, size := 3*num_visible_vertices).
[out]coordinates(double*) The pointer to user allocated memory that will be filled with interleaved coordinates.
Returns
ErrCode The error code indicating success or failure.

Definition at line 1021 of file iMOAB.cpp.

1022 {
1023  Range& verts = context.appDatas[*pid].all_verts;
1024 
1025  // interleaved coordinates, so that means deep copy anyway
1026  if( *coords_length != 3 * (int)verts.size() )
1027  {
1028  return moab::MB_FAILURE;
1029  }
1030 
1031  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1032 
1033  return moab::MB_SUCCESS;
1034 }

References GlobalContext::appDatas, context, ErrorCode, moab::Interface::get_coords(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, and moab::Range::size().

◆ iMOAB_Initialize()

ErrCode iMOAB_Initialize ( int  argc,
iMOAB_String argv 
)

Initialize the iMOAB interface implementation.

Note
Will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Parameters
[in]argc(int) Number of command line arguments
[in]argv(iMOAB_String*) Command line arguments
Returns
ErrCode

Definition at line 136 of file iMOAB.cpp.

137 {
138  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
139 
140  context.iArgc = argc;
141  context.iArgv = argv; // shallow copy
142 
143  if( 0 == context.refCountMB )
144  {
145  context.MBI = new( std::nothrow ) moab::Core;
146  // retrieve the default tags
147  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
149  // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
150  Tag gtags[4];
151 
152  for( int i = 0; i < 4; i++ )
153  {
154 
155  ErrorCode rval =
156  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
157  }
158 
159  context.material_tag = gtags[0];
160  context.neumann_tag = gtags[1];
161  context.dirichlet_tag = gtags[2];
162  context.globalID_tag = gtags[3];
163  }
164 
165  context.MPI_initialized = false;
166 #ifdef MOAB_HAVE_MPI
167  int flagInit;
168  MPI_Initialized( &flagInit );
169 
170  if( flagInit && !context.MPI_initialized )
171  {
172  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
173  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
174  context.MPI_initialized = true;
175  }
176 #endif
177 
179  return moab::MB_SUCCESS;
180 }

References context, DIRICHLET_SET_TAG_NAME, GlobalContext::dirichlet_tag, ErrorCode, GLOBAL_ID_TAG_NAME, GlobalContext::globalID_tag, GlobalContext::globalrank, GlobalContext::iArgc, GlobalContext::iArgv, IMOAB_CHECKPOINTER, MATERIAL_SET_TAG_NAME, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MB_TAG_ANY, MB_TYPE_INTEGER, GlobalContext::MBI, GlobalContext::MPI_initialized, NEUMANN_SET_TAG_NAME, GlobalContext::neumann_tag, GlobalContext::refCountMB, moab::Interface::tag_get_handle(), and GlobalContext::worldprocs.

Referenced by iMOAB_InitializeFortran().

◆ iMOAB_InitializeFortran()

ErrCode iMOAB_InitializeFortran ( void  )

Initialize the iMOAB interface implementation from Fortran driver.

It will create the MOAB instance, if not created already (reference counted).

Operations: Collective

Returns
ErrCode The error code indicating success or failure.

Definition at line 182 of file iMOAB.cpp.

183 {
184  return iMOAB_Initialize( 0, 0 );
185 }

References iMOAB_Initialize().

◆ iMOAB_LoadMesh()

ErrCode iMOAB_LoadMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  read_options,
int *  num_ghost_layers 
)

Load a MOAB mesh file in parallel and exchange ghost layers as requested.

Note
All communication is MPI-based, and read options include parallel loading information, resolving shared entities. Local MOAB instance is populated with mesh cells and vertices in the corresponding local partitions.

This will also exchange ghost cells and vertices, as requested. The default bridge dimension is 0 (vertices), and all additional lower dimensional sub-entities are exchanged (mesh edges and faces). The tags in the file are not exchanged by default. Default tag information for GLOBAL_ID, MATERIAL_SET, NEUMANN_SET and DIRICHLET_SET is exchanged. Global ID tag is exchanged for all cells and vertices. Material sets, Neumann sets and Dirichlet sets are all augmented with the ghost entities.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to load onto the internal application mesh set.
[in]read_options(iMOAB_String) Additional options for reading the MOAB mesh file in parallel.
[in]num_ghost_layers(int*) The total number of ghost layers to exchange during mesh loading.
Returns
ErrCode The error code indicating success or failure.

Definition at line 607 of file iMOAB.cpp.

611 {
612  IMOAB_CHECKPOINTER( filename, 2 );
613  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
614  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
615 
616  // make sure we use the file set and pcomm associated with the *pid
617  std::ostringstream newopts;
618  if( read_options ) newopts << read_options;
619 
620 #ifdef MOAB_HAVE_MPI
621 
623  {
624  if( context.worldprocs > 1 )
625  {
626  std::string opts( ( read_options ? read_options : "" ) );
627  std::string pcid( "PARALLEL_COMM=" );
628  std::size_t found = opts.find( pcid );
629 
630  if( found != std::string::npos )
631  {
632  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
633  return moab::MB_FAILURE;
634  }
635 
636  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
637  // files (used in test_remapping)
638  std::string filen( filename );
639  std::string::size_type idx = filen.rfind( '.' );
640 
641  if( idx != std::string::npos )
642  {
643  std::string extension = filen.substr( idx + 1 );
644  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
645  newopts << ";;PARALLEL_COMM=" << *pid;
646  }
647 
648  if( *num_ghost_layers >= 1 )
649  {
650  // if we want ghosts, we will want additional entities, the last .1
651  // because the addl ents can be edges, faces that are part of the neumann sets
652  std::string pcid2( "PARALLEL_GHOSTS=" );
653  std::size_t found2 = opts.find( pcid2 );
654 
655  if( found2 != std::string::npos )
656  {
657  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
658  "number of layers \n";
659  }
660  else
661  {
662  // dimension of primary entities is 3 here, but it could be 2 for climate
663  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
664  // example: ";PARALLEL_GHOSTS=2.0.1"
665  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
666  }
667  }
668  }
669  }
670 #else
671  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
672 #endif
673 
674  // Now let us actually load the MOAB file with the appropriate read options
675  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
676 
677 #ifdef VERBOSE
678  // some debugging stuff
679  std::ostringstream outfile;
680 #ifdef MOAB_HAVE_MPI
681  int rank = context.pcomms[*pid]->rank();
682  int nprocs = context.pcomms[*pid]->size();
683  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
684 #else
685  outfile << "TaskMesh_n1.0.h5m";
686 #endif
687  // the mesh contains ghosts too, but they are not part of mat/neumann set
688  // write in serial the file, to see what tags are missing
689  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
690 #endif
691 
692  // Update ghost layer information
693  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
694 
695  // Update mesh information
696  return iMOAB_UpdateMeshInfo( pid );
697 }

References GlobalContext::appDatas, context, ErrorCode, IMOAB_ASSERT, IMOAB_CHECKPOINTER, iMOAB_UpdateMeshInfo(), moab::Interface::load_file(), MB_CHK_ERR, GlobalContext::MBI, GlobalContext::MPI_initialized, GlobalContext::pcomms, GlobalContext::worldprocs, and moab::Interface::write_file().

◆ iMOAB_MergeVertices()

ErrCode iMOAB_MergeVertices ( iMOAB_AppID  pid)

Definition at line 3186 of file iMOAB.cpp.

3187 {
3188  appData& data = context.appDatas[*pid];
3189  ParallelComm* pco = context.pcomms[*pid];
3190  // collapse vertices and transform cells into triangles/quads /polys
3191  // tags we care about: area, frac, global id
3192  std::vector< Tag > tagsList;
3193  Tag tag;
3194  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3195  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3196  tagsList.push_back( tag );
3197  rval = context.MBI->tag_get_handle( "area", tag );
3198  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3199  rval = context.MBI->tag_get_handle( "frac", tag );
3200  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3201  double tol = 1.0e-9;
3202  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3203 
3204  // clean material sets of cells that were deleted
3206  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3207 
3208  if( !data.mat_sets.empty() )
3209  {
3210  EntityHandle matSet = data.mat_sets[0];
3211  Range elems;
3212  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3213  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3214  // put back only cells from data.file_set
3215  elems.clear();
3216  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3217  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3218  }
3219  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3220 
3221  ParallelMergeMesh pmm( pco, tol );
3222  rval = pmm.merge( data.file_set,
3223  /* do not do local merge*/ false,
3224  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3225 
3226  // assign global ids only for vertices, cells have them fine
3227  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3228 
3229  // set the partition tag on the file set
3230  Tag part_tag;
3231  int dum_id = -1;
3232  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3233  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3234 
3235  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3236  {
3237  std::cout << " can't get par part tag.\n";
3238  return moab::MB_FAILURE;
3239  }
3240 
3241  int rank = pco->rank();
3242  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3243 
3244  return moab::MB_SUCCESS;
3245 }

References moab::Interface::add_entities(), GlobalContext::appDatas, moab::ParallelComm::assign_global_ids(), moab::Range::clear(), context, moab::Range::empty(), ErrorCode, appData::file_set, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type_and_tag(), iMOAB_UpdateMeshInfo(), appData::mat_sets, GlobalContext::material_tag, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, MBENTITYSET, GlobalContext::MBI, moab::ParallelMergeMesh::merge(), GlobalContext::pcomms, moab::ParallelComm::rank(), moab::IntxUtils::remove_duplicate_vertices(), moab::Interface::remove_entities(), moab::Interface::tag_get_handle(), moab::Interface::tag_set_data(), and moab::Interface::UNION.

◆ iMOAB_ReadHeaderInfo()

ErrCode iMOAB_ReadHeaderInfo ( const iMOAB_String  filename,
int *  num_global_vertices,
int *  num_global_elements,
int *  num_dimension,
int *  num_parts 
)

It should be called on master task only, and information obtained could be broadcasted by the user. It is a fast lookup in the header of the file.

Note
Operations: Not collective
Parameters
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to probe for header information.
[out]num_global_vertices(int*) The total number of vertices in the mesh file.
[out]num_global_elements(int*) The total number of elements (of highest dimension only).
[out]num_dimension(int*) The highest dimension of elements in the mesh (Edge=1, Tri/Quad=2, Tet/Hex/Prism/Pyramid=3).
[out]num_parts(int*) The total number of partitions available in the mesh file, typically partitioned with mbpart during pre-processing.
Returns
ErrCode The error code indicating success or failure.

Definition at line 463 of file iMOAB.cpp.

468 {
469  IMOAB_CHECKPOINTER( filename, 1 );
470  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
471 
472 #ifdef MOAB_HAVE_HDF5
473  std::string filen( filename );
474 
475  int edges = 0;
476  int faces = 0;
477  int regions = 0;
478  if( num_global_vertices ) *num_global_vertices = 0;
479  if( num_global_elements ) *num_global_elements = 0;
480  if( num_dimension ) *num_dimension = 0;
481  if( num_parts ) *num_parts = 0;
482 
483  mhdf_FileHandle file;
484  mhdf_Status status;
485  unsigned long max_id;
486  struct mhdf_FileDesc* data;
487 
488  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
489 
490  if( mhdf_isError( &status ) )
491  {
492  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
493  return moab::MB_FAILURE;
494  }
495 
496  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
497  1 ); // will use extra set info; will get parallel partition tag info too!
498 
499  if( mhdf_isError( &status ) )
500  {
501  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
502  return moab::MB_FAILURE;
503  }
504 
505  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
506  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
507 
508  for( int i = 0; i < data->num_elem_desc; i++ )
509  {
510  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
511  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
512 
513  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
514  {
515  edges += ent_d->count;
516  }
517 
518  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
519  {
520  faces += ent_d->count;
521  }
522 
523  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
524  {
525  faces += ent_d->count;
526  }
527 
528  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
529  {
530  faces += ent_d->count;
531  }
532 
533  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
534  {
535  regions += ent_d->count;
536  }
537 
538  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
539  {
540  regions += ent_d->count;
541  }
542 
543  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
544  {
545  regions += ent_d->count;
546  }
547 
548  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
549  {
550  regions += ent_d->count;
551  }
552 
553  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
554  {
555  regions += ent_d->count;
556  }
557 
558  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
559  {
560  regions += ent_d->count;
561  }
562 
563  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
564  {
565  regions += ent_d->count;
566  }
567  }
568 
569  if( num_parts ) *num_parts = data->numEntSets[0];
570 
571  // is this required?
572  if( edges > 0 )
573  {
574  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
575  if( num_global_elements ) *num_global_elements = edges;
576  }
577 
578  if( faces > 0 )
579  {
580  if( num_dimension ) *num_dimension = 2;
581  if( num_global_elements ) *num_global_elements = faces;
582  }
583 
584  if( regions > 0 )
585  {
586  if( num_dimension ) *num_dimension = 3;
587  if( num_global_elements ) *num_global_elements = regions;
588  }
589 
590  mhdf_closeFile( file, &status );
591 
592  free( data );
593 
594 #else
595  std::cout << filename
596  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
597  "formats other than a h5m file.\n";
598  if( num_global_vertices ) *num_global_vertices = 0;
599  if( num_global_elements ) *num_global_elements = 0;
600  if( num_dimension ) *num_dimension = 0;
601  if( num_parts ) *num_parts = 0;
602 #endif
603 
604  return moab::MB_SUCCESS;
605 }

References mhdf_EntDesc::count, mhdf_ElemDesc::desc, mhdf_FileDesc::elems, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_SUCCESS, mdhf_HEX_TYPE_NAME, mdhf_KNIFE_TYPE_NAME, mhdf_closeFile(), mhdf_EDGE_TYPE_NAME, mhdf_getFileSummary(), mhdf_isError(), mhdf_message(), mhdf_openFile(), mhdf_POLYGON_TYPE_NAME, mhdf_POLYHEDRON_TYPE_NAME, mhdf_PRISM_TYPE_NAME, mhdf_PYRAMID_TYPE_NAME, mhdf_QUAD_TYPE_NAME, mhdf_SEPTAHEDRON_TYPE_NAME, mhdf_TET_TYPE_NAME, mhdf_TRI_TYPE_NAME, mhdf_FileDesc::nodes, mhdf_FileDesc::num_elem_desc, mhdf_FileDesc::numEntSets, mhdf_ElemDesc::type, and mhdf_EntDesc::vals_per_ent.

◆ iMOAB_ReceiveElementTag()

ErrCode iMOAB_ReceiveElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  join,
int *  context_id 
)

Definition at line 2740 of file iMOAB.cpp.

2741 {
2742  appData& data = context.appDatas[*pid];
2743 
2744  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2745  if( mt == data.pgraph.end() )
2746  {
2747  return moab::MB_FAILURE;
2748  }
2749  ParCommGraph* cgraph = mt->second;
2750 
2751  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2752  ParallelComm* pco = context.pcomms[*pid];
2753  Range owned = data.owned_elems;
2754 
2755  // how do I know if this receiver already participated in an intersection driven by coupler?
2756  // also, what if this was the "source" mesh in intx?
2757  // in that case, the elements might have been instantiated in the coverage set locally, the
2758  // "owned" range can be different the elements are now in tempestRemap coverage_set
2759  EntityHandle cover_set = cgraph->get_cover_set();
2760  ErrorCode rval;
2761  if( 0 != cover_set )
2762  {
2763  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2764  }
2765  if( data.point_cloud )
2766  {
2767  owned = data.local_verts;
2768  }
2769  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2770  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2771  // from ints remapper
2772 #ifdef MOAB_HAVE_TEMPESTREMAP
2773  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2774  {
2775  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2776  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2777  // should still have only quads ?
2778  }
2779 #endif
2780  /*
2781  * data_intx.remapper exists though only on the intersection application
2782  * how do we get from here ( we know the pid that receives, and the commgraph used by migrate
2783  * mesh )
2784  */
2785 
2786  std::string tag_name( tag_storage_name );
2787 
2788  // we assume that there are separators ";" between the tag names
2789  std::vector< std::string > tagNames;
2790  std::vector< Tag > tagHandles;
2791  std::string separator( ":" );
2792  split_tag_names( tag_name, separator, tagNames );
2793  for( size_t i = 0; i < tagNames.size(); i++ )
2794  {
2795  Tag tagHandle;
2796  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2797  if( MB_SUCCESS != rval || NULL == tagHandle )
2798  {
2799  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2800  }
2801  tagHandles.push_back( tagHandle );
2802  }
2803 
2804 #ifdef VERBOSE
2805  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2806  << " and file set = " << ( data.file_set ) << "\n";
2807 #endif
2808  // pco is needed to pack, and for moab instance, not for communication!
2809  // still use nonblocking communication
2810  rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2811 
2812 #ifdef VERBOSE
2813  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2814 #endif
2815 
2816  return moab::MB_SUCCESS;
2817 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, appData::file_set, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, GlobalContext::pcomms, appData::pgraph, appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_ReceiveMesh()

ErrCode iMOAB_ReceiveMesh ( iMOAB_AppID  pid,
MPI_Comm *  join,
MPI_Group *  sendingGroup,
int *  scompid 
)

Definition at line 2503 of file iMOAB.cpp.

2504 {
2505  assert( join != nullptr );
2506  assert( sendingGroup != nullptr );
2507  assert( scompid != nullptr );
2508 
2509  ErrorCode rval;
2510  appData& data = context.appDatas[*pid];
2511  ParallelComm* pco = context.pcomms[*pid];
2512  MPI_Comm receive = pco->comm();
2513  EntityHandle local_set = data.file_set;
2514 
2515  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2516  MPI_Group sendGroup =
2517  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2518 
2519  // first see what are the processors in each group; get the sender group too, from the sender
2520  // communicator
2521  MPI_Group receiverGroup;
2522  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2523 
2524  // instantiate the par comm graph
2525  ParCommGraph* cgraph =
2526  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2527  // TODO we should search if we have another pcomm with the same comp ids in the list already
2528  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2529  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2530 
2531  int receiver_rank = -1;
2532  MPI_Comm_rank( receive, &receiver_rank );
2533 
2534  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2535  // knows what data to expect
2536  std::vector< int > pack_array;
2537  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2538 
2539  // senders across for the current receiver
2540  int current_receiver = cgraph->receiver( receiver_rank );
2541 
2542  std::vector< int > senders_local;
2543  size_t n = 0;
2544 
2545  while( n < pack_array.size() )
2546  {
2547  if( current_receiver == pack_array[n] )
2548  {
2549  for( int j = 0; j < pack_array[n + 1]; j++ )
2550  {
2551  senders_local.push_back( pack_array[n + 2 + j] );
2552  }
2553 
2554  break;
2555  }
2556 
2557  n = n + 2 + pack_array[n + 1];
2558  }
2559 
2560 #ifdef VERBOSE
2561  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2562  << senders_local.size() << " tasks: ";
2563 
2564  for( int k = 0; k < (int)senders_local.size(); k++ )
2565  {
2566  std::cout << " " << senders_local[k];
2567  }
2568 
2569  std::cout << "\n";
2570 #endif
2571 
2572  if( senders_local.empty() )
2573  {
2574  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2575  }
2576  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2577 
2578  // after we are done, we could merge vertices that come from different senders, but
2579  // have the same global id
2580  Tag idtag;
2581  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2582 
2583  // data.point_cloud = false;
2584  Range local_ents;
2585  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2586 
2587  // do not do merge if point cloud
2588  if( !local_ents.all_of_type( MBVERTEX ) )
2589  {
2590  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2591  // that might come from different senders
2592  {
2593 
2594  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2595  Range local_elems = subtract( local_ents, local_verts );
2596 
2597  // remove from local set the vertices
2598  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2599 
2600 #ifdef VERBOSE
2601  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2602 #endif
2603  MergeMesh mm( context.MBI );
2604 
2605  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2606 
2607  Range new_verts; // local elems are local entities without vertices
2608  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2609 
2610 #ifdef VERBOSE
2611  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2612 #endif
2613  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2614  }
2615  }
2616  else
2617  data.point_cloud = true;
2618 
2619  if( !data.point_cloud )
2620  {
2621  // still need to resolve shared entities (in this case, vertices )
2622  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2623  }
2624  else
2625  {
2626  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2627  Tag densePartTag;
2628  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2629  if( NULL != densePartTag && MB_SUCCESS == rval )
2630  {
2631  Range local_verts;
2632  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2633  std::vector< int > vals;
2634  int rank = pco->rank();
2635  vals.resize( local_verts.size(), rank );
2636  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2637  }
2638  }
2639  // set the parallel partition tag
2640  Tag part_tag;
2641  int dum_id = -1;
2642  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2643  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2644 
2645  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2646  {
2647  std::cout << " can't get par part tag.\n";
2648  return moab::MB_FAILURE;
2649  }
2650 
2651  int rank = pco->rank();
2652  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2653 
2654  // make sure that the GLOBAL_ID is defined as a tag
2655  int tagtype = 0; // dense, integer
2656  int numco = 1; // size
2657  int tagindex; // not used
2658  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2659  // populate the mesh with current data info
2660  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2661 
2662  // mark for deletion
2663  MPI_Group_free( &receiverGroup );
2664 
2665  return moab::MB_SUCCESS;
2666 }

References moab::Interface::add_entities(), moab::Range::all_of_type(), GlobalContext::appDatas, CHK_MPI_ERR, moab::ParallelComm::comm(), context, ErrorCode, appData::file_set, moab::Interface::get_connectivity(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_handle(), iMOAB_DefineTagStorage(), iMOAB_UpdateMeshInfo(), appData::is_fortran, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, MBVERTEX, moab::MergeMesh::merge_using_integer_tag(), GlobalContext::pcomms, appData::point_cloud, moab::ParallelComm::rank(), moab::ParCommGraph::receive_comm_graph(), moab::ParCommGraph::receive_mesh(), moab::ParCommGraph::receiver(), moab::Interface::remove_entities(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Range::subset_by_type(), moab::subtract(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_ReduceTagsMax()

ErrCode iMOAB_ReduceTagsMax ( iMOAB_AppID  pid,
int *  tag_index,
int *  ent_type 
)

Perform global reductions with the processes in the current applications communicator. Specifically this routine performs reductions on the maximum value of the tag indicated by tag_index.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_index(int*) The tag index of interest.
[in]ent_type(int*) The type of entity for tag reduction operation (vertices = 0, elements = 1)
Returns
ErrCode The error code indicating success or failure.

Definition at line 2140 of file iMOAB.cpp.

2141 {
2142 #ifdef MOAB_HAVE_MPI
2143  appData& data = context.appDatas[*pid];
2144  Range ent_exchange;
2145 
2146  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2147  {
2148  return moab::MB_FAILURE;
2149  } // error in tag index
2150 
2151  Tag tagh = data.tagList[*tag_index];
2152 
2153  if( *ent_type == 0 )
2154  {
2155  ent_exchange = data.all_verts;
2156  }
2157  else if( *ent_type == 1 )
2158  {
2159  ent_exchange = data.primary_elems;
2160  }
2161  else
2162  {
2163  return moab::MB_FAILURE;
2164  } // unexpected type
2165 
2166  ParallelComm* pco = context.pcomms[*pid];
2167  // we could do different MPI_Op; do not bother now, we will call from fortran
2168  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2169 
2170 #else
2171  /* do nothing if serial */
2172  // just silence the warning
2173  // do not call sync tags in serial!
2174  int k = *pid + *tag_index + *ent_type;
2175  k++; // just do junk, to avoid complaints
2176 #endif
2177  return moab::MB_SUCCESS;
2178 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, GlobalContext::pcomms, appData::primary_elems, moab::ParallelComm::reduce_tags(), and appData::tagList.

◆ iMOAB_RegisterApplication()

ErrCode iMOAB_RegisterApplication ( const iMOAB_String  app_name,
MPI_Comm *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 199 of file iMOAB.cpp.

205 {
206  IMOAB_CHECKPOINTER( app_name, 1 );
207 #ifdef MOAB_HAVE_MPI
208  IMOAB_CHECKPOINTER( comm, 2 );
209  IMOAB_CHECKPOINTER( compid, 3 );
210 #else
211  IMOAB_CHECKPOINTER( compid, 2 );
212 #endif
213 
214  // will create a parallel comm for this application too, so there will be a
215  // mapping from *pid to file set and to parallel comm instances
216  std::string name( app_name );
217 
218  if( context.appIdMap.find( name ) != context.appIdMap.end() )
219  {
220  std::cout << " application " << name << " already registered \n";
221  return moab::MB_FAILURE;
222  }
223 
224  *pid = context.unused_pid++;
225  context.appIdMap[name] = *pid;
226  int rankHere = 0;
227 #ifdef MOAB_HAVE_MPI
228  MPI_Comm_rank( *comm, &rankHere );
229 #endif
230  if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
231  if( *compid <= 0 )
232  {
233  std::cout << " convention for external application is to have its id positive \n";
234  return moab::MB_FAILURE;
235  }
236 
237  if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
238  {
239  std::cout << " external application with comp id " << *compid << " is already registered\n";
240  return moab::MB_FAILURE;
241  }
242 
243  context.appIdCompMap[*compid] = *pid;
244 
245  // now create ParallelComm and a file set for this application
246 #ifdef MOAB_HAVE_MPI
247  if( *comm )
248  {
249  ParallelComm* pco = new ParallelComm( context.MBI, *comm );
250 
251 #ifndef NDEBUG
252  int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
253  assert( index == *pid );
254  // here, we assert the the pid is the same as the id of the ParallelComm instance
255  // useful for writing in parallel
256 #endif
257  context.pcomms.push_back( pco );
258  }
259  else
260  {
261  context.pcomms.push_back( 0 );
262  }
263 #endif
264 
265  // create now the file set that will be used for loading the model in
266  EntityHandle file_set;
267  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
268 
269  appData app_data;
270  app_data.file_set = file_set;
271  app_data.global_id = *compid; // will be used mostly for par comm graph
272  app_data.name = name; // save the name of application
273 
274 #ifdef MOAB_HAVE_TEMPESTREMAP
275  app_data.tempestData.remapper = NULL; // Only allocate as needed
276 #endif
277 
278  app_data.num_ghost_layers = 0;
279  app_data.point_cloud = false;
280  app_data.is_fortran = false;
281 
282  context.appDatas.push_back(
283  app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
284  return moab::MB_SUCCESS;
285 }

References GlobalContext::appDatas, GlobalContext::appIdCompMap, GlobalContext::appIdMap, context, moab::Interface::create_meshset(), ErrorCode, appData::file_set, moab::ParallelComm::get_id(), appData::global_id, IMOAB_CHECKPOINTER, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, MESHSET_SET, appData::name, appData::num_ghost_layers, GlobalContext::pcomms, appData::point_cloud, and GlobalContext::unused_pid.

Referenced by iMOAB_RegisterApplicationFortran().

◆ iMOAB_RegisterApplicationFortran()

ErrCode iMOAB_RegisterApplicationFortran ( const iMOAB_String  app_name,
int *  comm,
int *  compid,
iMOAB_AppID  pid 
)

Definition at line 287 of file iMOAB.cpp.

293 {
294  IMOAB_CHECKPOINTER( app_name, 1 );
295 #ifdef MOAB_HAVE_MPI
296  IMOAB_CHECKPOINTER( comm, 2 );
297  IMOAB_CHECKPOINTER( compid, 3 );
298 #else
299  IMOAB_CHECKPOINTER( compid, 2 );
300 #endif
301 
302  ErrCode err;
303  assert( app_name != nullptr );
304  std::string name( app_name );
305 
306 #ifdef MOAB_HAVE_MPI
307  MPI_Comm ccomm;
308  if( comm )
309  {
310  // convert from Fortran communicator to a C communicator
311  // see transfer of handles
312  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
313  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
314  }
315 #endif
316 
317  // now call C style registration function:
318  err = iMOAB_RegisterApplication( app_name,
319 #ifdef MOAB_HAVE_MPI
320  &ccomm,
321 #endif
322  compid, pid );
323 
324  // Now that we have created the application context, store that
325  // the application being registered is from a Fortran context
326  context.appDatas[*pid].is_fortran = true;
327 
328  return err;
329 }

References GlobalContext::appDatas, context, ErrCode, IMOAB_CHECKPOINTER, iMOAB_RegisterApplication(), and MOAB_HAVE_MPI.

◆ iMOAB_ResolveSharedEntities()

ErrCode iMOAB_ResolveSharedEntities ( iMOAB_AppID  pid,
int *  num_verts,
int *  marker 
)

Resolve shared entities using global markers on shared vertices.

Note
Global markers can be a global node id, for example, or a global DoF number (as for HOMME)

Operations: Collective .

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_verts(int*) Number of vertices.
[in]marker(int*) Resolving marker (global id marker).
Returns
ErrCode The error code indicating success or failure.

Definition at line 2330 of file iMOAB.cpp.

2331 {
2332  appData& data = context.appDatas[*pid];
2333  ParallelComm* pco = context.pcomms[*pid];
2334  EntityHandle cset = data.file_set;
2335  int dum_id = 0;
2336  ErrorCode rval;
2337  if( data.primary_elems.empty() )
2338  {
2339  // skip actual resolve, assume vertices are distributed already ,
2340  // no need to share them
2341  }
2342  else
2343  {
2344  // create an integer tag for resolving ; maybe it can be a long tag in the future
2345  // (more than 2 B vertices;)
2346 
2347  Tag stag;
2348  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2349  &dum_id );MB_CHK_ERR( rval );
2350 
2351  if( *num_verts > (int)data.local_verts.size() )
2352  {
2353  return moab::MB_FAILURE;
2354  } // we are not setting the size
2355 
2356  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2357 
2358  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2359 
2360  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2361  }
2362  // provide partition tag equal to rank
2363  Tag part_tag;
2364  dum_id = -1;
2365  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2366  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2367 
2368  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2369  {
2370  std::cout << " can't get par part tag.\n";
2371  return moab::MB_FAILURE;
2372  }
2373 
2374  int rank = pco->rank();
2375  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2376 
2377  return moab::MB_SUCCESS;
2378 }

References GlobalContext::appDatas, context, moab::Range::empty(), ErrorCode, appData::file_set, appData::local_verts, MB_ALREADY_ALLOCATED, MB_CHK_ERR, MB_SUCCESS, MB_TAG_CREAT, MB_TAG_DENSE, MB_TAG_SPARSE, MB_TYPE_INTEGER, GlobalContext::MBI, GlobalContext::pcomms, appData::primary_elems, moab::ParallelComm::rank(), moab::ParallelComm::resolve_shared_ents(), moab::Range::size(), moab::Interface::tag_delete(), moab::Interface::tag_get_handle(), and moab::Interface::tag_set_data().

◆ iMOAB_SendElementTag()

ErrCode iMOAB_SendElementTag ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
MPI_Comm *  join,
int *  context_id 
)

Definition at line 2668 of file iMOAB.cpp.

2669 {
2670  appData& data = context.appDatas[*pid];
2671  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2672  if( mt == data.pgraph.end() )
2673  {
2674  return moab::MB_FAILURE;
2675  }
2676  ParCommGraph* cgraph = mt->second;
2677  ParallelComm* pco = context.pcomms[*pid];
2678  Range owned = data.owned_elems;
2679  ErrorCode rval;
2680  EntityHandle cover_set;
2681 
2682  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2683  if( data.point_cloud )
2684  {
2685  owned = data.local_verts;
2686  }
2687 
2688  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2689  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2690  // from ints remapper
2691 #ifdef MOAB_HAVE_TEMPESTREMAP
2692  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2693  {
2694  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2695  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2696  // should still have only quads ?
2697  }
2698 #else
2699  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2700  // how do I know if this receiver already participated in an intersection driven by coupler?
2701  // also, what if this was the "source" mesh in intx?
2702  // in that case, the elements might have been instantiated in the coverage set locally, the
2703  // "owned" range can be different the elements are now in tempestRemap coverage_set
2704  cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2705 
2706  if( 0 != cover_set )
2707  {
2708  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2709  }
2710 #endif
2711 
2712  std::string tag_name( tag_storage_name );
2713 
2714  // basically, we assume everything is defined already on the tag,
2715  // and we can get the tags just by its name
2716  // we assume that there are separators ":" between the tag names
2717  std::vector< std::string > tagNames;
2718  std::vector< Tag > tagHandles;
2719  std::string separator( ":" );
2720  split_tag_names( tag_name, separator, tagNames );
2721  for( size_t i = 0; i < tagNames.size(); i++ )
2722  {
2723  Tag tagHandle;
2724  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2725  if( MB_SUCCESS != rval || NULL == tagHandle )
2726  {
2727  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2728  }
2729  tagHandles.push_back( tagHandle );
2730  }
2731 
2732  // pco is needed to pack, and for moab instance, not for communication!
2733  // still use nonblocking communication, over the joint comm
2734  rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2735  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2736 
2737  return moab::MB_SUCCESS;
2738 }

References GlobalContext::appDatas, context, moab::Remapper::CoveringMesh, ErrorCode, moab::ParCommGraph::get_cover_set(), moab::Interface::get_entities_by_dimension(), appData::is_fortran, appData::local_verts, MB_CHK_ERR, MB_CHK_SET_ERR, MB_SUCCESS, GlobalContext::MBI, appData::owned_elems, GlobalContext::pcomms, appData::pgraph, appData::point_cloud, moab::ParCommGraph::send_tag_values(), split_tag_names(), and moab::Interface::tag_get_handle().

◆ iMOAB_SendMesh()

ErrCode iMOAB_SendMesh ( iMOAB_AppID  pid,
MPI_Comm *  join,
MPI_Group *  receivingGroup,
int *  rcompid,
int *  method 
)

Definition at line 2412 of file iMOAB.cpp.

2413 {
2414  assert( join != nullptr );
2415  assert( receivingGroup != nullptr );
2416  assert( rcompid != nullptr );
2417 
2418  ErrorCode rval;
2419  int ierr;
2420  appData& data = context.appDatas[*pid];
2421  ParallelComm* pco = context.pcomms[*pid];
2422 
2423  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2424  MPI_Group recvGroup =
2425  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2426  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2427  // first see what are the processors in each group; get the sender group too, from the sender communicator
2428  MPI_Group senderGroup;
2429  ierr = MPI_Comm_group( sender, &senderGroup );
2430  if( ierr != 0 ) return moab::MB_FAILURE;
2431 
2432  // instantiate the par comm graph
2433  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2434  // int coid2)
2435  ParCommGraph* cgraph =
2436  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2437  // we should search if we have another pcomm with the same comp ids in the list already
2438  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2439  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2440 
2441  int sender_rank = -1;
2442  MPI_Comm_rank( sender, &sender_rank );
2443 
2444  // decide how to distribute elements to each processor
2445  // now, get the entities on local processor, and pack them into a buffer for various processors
2446  // we will do trivial partition: first get the total number of elements from "sender"
2447  std::vector< int > number_elems_per_part;
2448  // how to distribute local elements to receiving tasks?
2449  // trivial partition: compute first the total number of elements need to be sent
2450  Range owned = context.appDatas[*pid].owned_elems;
2451  if( owned.size() == 0 )
2452  {
2453  // must be vertices that we want to send then
2454  owned = context.appDatas[*pid].local_verts;
2455  // we should have some vertices here
2456  }
2457 
2458  if( *method == 0 ) // trivial partitioning, old method
2459  {
2460  int local_owned_elem = (int)owned.size();
2461  int size = pco->size();
2462  int rank = pco->rank();
2463  number_elems_per_part.resize( size ); //
2464  number_elems_per_part[rank] = local_owned_elem;
2465 #if( MPI_VERSION >= 2 )
2466  // Use "in place" option
2467  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2468 #else
2469  {
2470  std::vector< int > all_tmp( size );
2471  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2472  number_elems_per_part = all_tmp;
2473  }
2474 #endif
2475 
2476  if( ierr != 0 )
2477  {
2478  return moab::MB_FAILURE;
2479  }
2480 
2481  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2482  // to each sender
2483  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2484 
2485  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2486  }
2487  else // *method != 0, so it is either graph or geometric, parallel
2488  {
2489  // owned are the primary elements on this app
2490  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2491 
2492  // basically, send the graph to the receiver side, with unblocking send
2493  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2494  }
2495  // pco is needed to pack, not for communication
2496  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2497 
2498  // mark for deletion
2499  MPI_Group_free( &senderGroup );
2500  return moab::MB_SUCCESS;
2501 }

References GlobalContext::appDatas, moab::ParallelComm::comm(), moab::ParCommGraph::compute_partition(), moab::ParCommGraph::compute_trivial_partition(), context, ErrorCode, appData::is_fortran, MB_CHK_ERR, MB_SUCCESS, GlobalContext::pcomms, moab::ParallelComm::rank(), moab::ParCommGraph::send_graph(), moab::ParCommGraph::send_graph_partition(), moab::ParCommGraph::send_mesh_parts(), size, moab::Range::size(), and moab::ParallelComm::size().

◆ iMOAB_SetDoubleTagStorage()

ErrCode iMOAB_SetDoubleTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data 
)

Store the specified values in a MOAB double Tag.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements). unrolled by tags
Returns
ErrCode The error code indicating success or failure.

Definition at line 1716 of file iMOAB.cpp.

1721 {
1722  ErrorCode rval;
1723  std::string tag_names( tag_storage_names );
1724  // exactly the same code as for int tag :) maybe should check the type of tag too
1725  std::vector< std::string > tagNames;
1726  std::vector< Tag > tagHandles;
1727  std::string separator( ":" );
1728  split_tag_names( tag_names, separator, tagNames );
1729 
1730  appData& data = context.appDatas[*pid];
1731  Range* ents_to_set = NULL;
1732 
1733  if( *ent_type == 0 ) // vertices
1734  {
1735  ents_to_set = &data.all_verts;
1736  }
1737  else if( *ent_type == 1 )
1738  {
1739  ents_to_set = &data.primary_elems;
1740  }
1741 
1742  int nents_to_be_set = (int)( *ents_to_set ).size();
1743  int position = 0;
1744 
1745  for( size_t i = 0; i < tagNames.size(); i++ )
1746  {
1747  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1748  {
1749  return moab::MB_FAILURE;
1750  } // some tag not defined yet in the app
1751 
1752  Tag tag = data.tagMap[tagNames[i]];
1753 
1754  int tagLength = 0;
1755  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1756 
1757  DataType dtype;
1758  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1759 
1760  if( dtype != MB_TYPE_DOUBLE )
1761  {
1762  return moab::MB_FAILURE;
1763  }
1764 
1765  // set it on the subset of entities, based on type and length
1766  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1767  return moab::MB_FAILURE; // too many entity values to be set
1768 
1769  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1770  // increment position to next tag
1771  position = position + tagLength * nents_to_be_set;
1772  }
1773  return moab::MB_SUCCESS; // no error
1774 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), split_tag_names(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SetDoubleTagStorageWithGid()

ErrCode iMOAB_SetDoubleTagStorageWithGid ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_names,
int *  num_tag_storage_length,
int *  entity_type,
double *  tag_storage_data,
int *  globalIds 
)

Store the specified values in a MOAB double Tag, for given ids.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag names, separated, to store the data.
[in]num_tag_storage_length(int*) The size of total tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity*num_tags).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[in]tag_storage_data(double*) The array data of type double to replace the internal tag memory; The data is assumed to be permuted over the local set of visible entities (either vertices or elements). unrolled by tags in parallel, might be different order, or different entities on the task
[in]globalIdsglobal ids of the cells to be set;
Returns
ErrCode The error code indicating success or failure.

Definition at line 1776 of file iMOAB.cpp.

1782 {
1783  ErrorCode rval;
1784  std::string tag_names( tag_storage_names );
1785  // exactly the same code as for int tag :) maybe should check the type of tag too
1786  std::vector< std::string > tagNames;
1787  std::vector< Tag > tagHandles;
1788  std::string separator( ":" );
1789  split_tag_names( tag_names, separator, tagNames );
1790 
1791  appData& data = context.appDatas[*pid];
1792  Range* ents_to_set = NULL;
1793 
1794  if( *ent_type == 0 ) // vertices
1795  {
1796  ents_to_set = &data.all_verts;
1797  }
1798  else if( *ent_type == 1 )
1799  {
1800  ents_to_set = &data.primary_elems;
1801  }
1802 
1803  int nents_to_be_set = (int)( *ents_to_set ).size();
1804 
1805  Tag gidTag = context.MBI->globalId_tag();
1806  std::vector< int > gids;
1807  gids.resize( nents_to_be_set );
1808  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1809 
1810  // so we will need to set the tags according to the global id passed;
1811  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1812  // in local range is gids
1813  std::map< int, EntityHandle > eh_by_gid;
1814  int i = 0;
1815  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1816  {
1817  eh_by_gid[gids[i]] = *it;
1818  }
1819 
1820  std::vector< int > tagLengths( tagNames.size() );
1821  std::vector< Tag > tagList;
1822  size_t total_tag_len = 0;
1823  for( size_t i = 0; i < tagNames.size(); i++ )
1824  {
1825  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1826  {
1827  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1828  } // some tag not defined yet in the app
1829 
1830  Tag tag = data.tagMap[tagNames[i]];
1831  tagList.push_back( tag );
1832 
1833  int tagLength = 0;
1834  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1835 
1836  total_tag_len += tagLength;
1837  tagLengths[i] = tagLength;
1838  DataType dtype;
1839  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1840 
1841  if( dtype != MB_TYPE_DOUBLE )
1842  {
1843  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1844  }
1845  }
1846  bool serial = true;
1847 #ifdef MOAB_HAVE_MPI
1848  ParallelComm* pco = context.pcomms[*pid];
1849  unsigned num_procs = pco->size();
1850  if( num_procs > 1 ) serial = false;
1851 #endif
1852 
1853  if( serial )
1854  {
1855  // we do not assume anymore that the number of entities has to match
1856  // we will set only what matches, and skip entities that do not have corresponding global ids
1857  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1858  // tags are unrolled, we loop over global ids first, then careful about tags
1859  for( int i = 0; i < nents_to_be_set; i++ )
1860  {
1861  int gid = globalIds[i];
1862  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1863  if( mapIt == eh_by_gid.end() ) continue;
1864  EntityHandle eh = mapIt->second;
1865  // now loop over tags
1866  int indexInTagValues = 0; //
1867  for( size_t j = 0; j < tagList.size(); j++ )
1868  {
1869  indexInTagValues += i * tagLengths[j];
1870  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1871  // advance the pointer/index
1872  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1873  }
1874  }
1875  }
1876 #ifdef MOAB_HAVE_MPI
1877  else // it can be not serial only if pco->size() > 1, parallel
1878  {
1879  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1880  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1881  // it is a 2-hop global gather scatter
1882  // TODO: allow for tags of different length; this is wrong
1883  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1884  // we do not expect the sizes to match
1885  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1886  TupleList TLsend;
1887  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1888  TLsend.enableWriteAccess();
1889  // the processor id that processes global_id is global_id / num_ents_per_proc
1890 
1891  int indexInRealLocal = 0;
1892  for( int i = 0; i < nbLocalVals; i++ )
1893  {
1894  // to proc, marker, element local index, index in el
1895  int marker = globalIds[i];
1896  int to_proc = marker % num_procs;
1897  int n = TLsend.get_n();
1898  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1899  TLsend.vi_wr[2 * n + 1] = marker;
1900  int indexInTagValues = 0;
1901  // tag data collect by number of tags
1902  for( size_t j = 0; j < tagList.size(); j++ )
1903  {
1904  indexInTagValues += i * tagLengths[j];
1905  for( int k = 0; k < tagLengths[j]; k++ )
1906  {
1907  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1908  }
1909  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1910  }
1911  TLsend.inc_n();
1912  }
1913 
1914  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1915  // send now requests, basically inform the rendez-vous point who needs a particular global id
1916  // send the data to the other processors:
1917  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1918  TupleList TLreq;
1919  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1920  TLreq.enableWriteAccess();
1921  for( int i = 0; i < nents_to_be_set; i++ )
1922  {
1923  // to proc, marker
1924  int marker = gids[i];
1925  int to_proc = marker % num_procs;
1926  int n = TLreq.get_n();
1927  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1928  TLreq.vi_wr[2 * n + 1] = marker;
1929  // tag data collect by number of tags
1930  TLreq.inc_n();
1931  }
1932  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1933 
1934  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1935  // we should first order by global id, and then build the new TL with send to proc, global id and
1936  // tags for it
1937  // sort by global ids the tuple lists
1938  moab::TupleList::buffer sort_buffer;
1939  sort_buffer.buffer_init( TLreq.get_n() );
1940  TLreq.sort( 1, &sort_buffer );
1941  sort_buffer.reset();
1942  sort_buffer.buffer_init( TLsend.get_n() );
1943  TLsend.sort( 1, &sort_buffer );
1944  sort_buffer.reset();
1945  // now send the tag values to the proc that requested it
1946  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1947  // each dof should have exactly one target proc. Is that true or not in general ?
1948  // how do we plan to use this? Is it better to store the comm graph for future
1949 
1950  // start copy from comm graph settle
1951  TupleList TLBack;
1952  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1953  TLBack.enableWriteAccess();
1954 
1955  int n1 = TLreq.get_n();
1956  int n2 = TLsend.get_n();
1957 
1958  int indexInTLreq = 0;
1959  int indexInTLsend = 0; // advance both, according to the marker
1960  if( n1 > 0 && n2 > 0 )
1961  {
1962 
1963  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1964  {
1965  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1966  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1967  if( currentValue1 < currentValue2 )
1968  {
1969  // we have a big problem; basically, we are saying that
1970  // dof currentValue is on one model and not on the other
1971  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1972  indexInTLreq++;
1973  continue;
1974  }
1975  if( currentValue1 > currentValue2 )
1976  {
1977  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1978  indexInTLsend++;
1979  continue;
1980  }
1981  int size1 = 1;
1982  int size2 = 1;
1983  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1984  size1++;
1985  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1986  size2++;
1987  // must be found in both lists, find the start and end indices
1988  for( int i1 = 0; i1 < size1; i1++ )
1989  {
1990  for( int i2 = 0; i2 < size2; i2++ )
1991  {
1992  // send the info back to components
1993  int n = TLBack.get_n();
1994  TLBack.reserve();
1995  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1996  // came from, info from comp2
1997  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1998  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1999  // also fill tag values
2000  for( size_t k = 0; k < total_tag_len; k++ )
2001  {
2002  TLBack.vr_rd[total_tag_len * n + k] =
2003  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
2004  }
2005  }
2006  }
2007  indexInTLreq += size1;
2008  indexInTLsend += size2;
2009  }
2010  }
2011  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2012  // end copy from comm graph
2013  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2014  n1 = TLBack.get_n();
2015  double* ptrVal = &TLBack.vr_rd[0]; //
2016  for( int i = 0; i < n1; i++ )
2017  {
2018  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2019  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2020  if( mapIt == eh_by_gid.end() ) continue;
2021  EntityHandle eh = mapIt->second;
2022  // now loop over tags
2023 
2024  for( size_t j = 0; j < tagList.size(); j++ )
2025  {
2026  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2027  // advance the pointer/index
2028  ptrVal += tagLengths[j]; // at the end of tag data per call
2029  }
2030  }
2031  }
2032 #endif
2033  return MB_SUCCESS;
2034 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::begin(), context, moab::ProcConfig::crystal_router(), moab::TupleList::enableWriteAccess(), moab::Range::end(), ErrorCode, moab::TupleList::get_n(), moab::Interface::globalId_tag(), moab::TupleList::inc_n(), moab::TupleList::initialize(), MB_CHK_ERR, MB_SET_ERR, MB_SUCCESS, MB_TYPE_DOUBLE, GlobalContext::MBI, GlobalContext::pcomms, appData::primary_elems, moab::ParallelComm::proc_config(), moab::TupleList::reserve(), moab::TupleList::buffer::reset(), moab::Range::size(), moab::ParallelComm::size(), moab::TupleList::sort(), split_tag_names(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), appData::tagMap, moab::TupleList::vi_rd, moab::TupleList::vi_wr, moab::TupleList::vr_rd, and moab::TupleList::vr_wr.

◆ iMOAB_SetGlobalInfo()

ErrCode iMOAB_SetGlobalInfo ( iMOAB_AppID  pid,
int *  num_global_verts,
int *  num_global_elems 
)

Set global information for number of vertices and number of elements; It is usually available from the h5m file, or it can be computed with a MPI_Reduce.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_global_verts(int*) The number of total vertices in the mesh.
[in]num_global_elems(MPI_Comm) The number of total elements in the mesh.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2304 of file iMOAB.cpp.

2305 {
2306  appData& data = context.appDatas[*pid];
2307  data.num_global_vertices = *num_global_verts;
2308  data.num_global_elements = *num_global_elems;
2309  return moab::MB_SUCCESS;
2310 }

References GlobalContext::appDatas, context, MB_SUCCESS, appData::num_global_elements, and appData::num_global_vertices.

Referenced by iMOAB_UpdateMeshInfo().

◆ iMOAB_SetIntTagStorage()

ErrCode iMOAB_SetIntTagStorage ( iMOAB_AppID  pid,
const iMOAB_String  tag_storage_name,
int *  num_tag_storage_length,
int *  entity_type,
int *  tag_storage_data 
)

Store the specified values in a MOAB integer Tag.

Values are set on vertices or elements, depending on entity_type

Note
we could change the api to accept as input the tag index, as returned by iMOAB_DefineTagStorage.

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]tag_storage_name(iMOAB_String) The tag name to store/retreive the data in MOAB.
[in]num_tag_storage_length(int*) The size of tag storage data (e.g., num_visible_vertices*components_per_entity or num_visible_elements*components_per_entity).
[in]entity_type(int*) Type=0 for vertices, and Type=1 for primary elements.
[out]tag_storage_data(int*) The array data of type int to replace the internal tag memory; The data is assumed to be contiguous over the local set of visible entities (either vertices or elements).
Returns
ErrCode The error code indicating success or failure.

Definition at line 1602 of file iMOAB.cpp.

1607 {
1608  std::string tag_name( tag_storage_name );
1609 
1610  // Get the application data
1611  appData& data = context.appDatas[*pid];
1612 
1613  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1614  {
1615  return moab::MB_FAILURE;
1616  } // tag not defined
1617 
1618  Tag tag = data.tagMap[tag_name];
1619 
1620  int tagLength = 0;
1621  ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1622 
1623  DataType dtype;
1624  rval = context.MBI->tag_get_data_type( tag, dtype );
1625 
1626  if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
1627  {
1628  return moab::MB_FAILURE;
1629  }
1630 
1631  // set it on a subset of entities, based on type and length
1632  Range* ents_to_set;
1633 
1634  if( *ent_type == 0 ) // vertices
1635  {
1636  ents_to_set = &data.all_verts;
1637  }
1638  else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1639  {
1640  ents_to_set = &data.primary_elems;
1641  }
1642 
1643  int nents_to_be_set = *num_tag_storage_length / tagLength;
1644 
1645  if( nents_to_be_set > (int)ents_to_set->size() )
1646  {
1647  return moab::MB_FAILURE;
1648  } // to many entities to be set or too few
1649 
1650  // restrict the range; everything is contiguous; or not?
1651 
1652  // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1653  // 1 ) );
1654  rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
1655 
1656  return moab::MB_SUCCESS; // no error
1657 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, MB_CHK_ERR, MB_SUCCESS, MB_TYPE_INTEGER, GlobalContext::MBI, appData::primary_elems, moab::Range::size(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_length(), moab::Interface::tag_set_data(), and appData::tagMap.

◆ iMOAB_SynchronizeTags()

ErrCode iMOAB_SynchronizeTags ( iMOAB_AppID  pid,
int *  num_tag,
int *  tag_indices,
int *  ent_type 
)

Exchange tag values for the given tags across process boundaries.

Note
Operations: Collective
Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]num_tags(int*) Number of tags to exchange.
[in]tag_indices(int*) Array with tag indices of interest (size = *num_tags).
[in]ent_type(int*) The type of entity for tag exchange.
Returns
ErrCode The error code indicating success or failure.

Definition at line 2095 of file iMOAB.cpp.

2096 {
2097 #ifdef MOAB_HAVE_MPI
2098  appData& data = context.appDatas[*pid];
2099  Range ent_exchange;
2100  std::vector< Tag > tags;
2101 
2102  for( int i = 0; i < *num_tag; i++ )
2103  {
2104  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2105  {
2106  return moab::MB_FAILURE;
2107  } // error in tag index
2108 
2109  tags.push_back( data.tagList[tag_indices[i]] );
2110  }
2111 
2112  if( *ent_type == 0 )
2113  {
2114  ent_exchange = data.all_verts;
2115  }
2116  else if( *ent_type == 1 )
2117  {
2118  ent_exchange = data.primary_elems;
2119  }
2120  else
2121  {
2122  return moab::MB_FAILURE;
2123  } // unexpected type
2124 
2125  ParallelComm* pco = context.pcomms[*pid];
2126 
2127  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2128 
2129 #else
2130  /* do nothing if serial */
2131  // just silence the warning
2132  // do not call sync tags in serial!
2133  int k = *pid + *num_tag + *tag_indices + *ent_type;
2134  k++;
2135 #endif
2136 
2137  return moab::MB_SUCCESS;
2138 }

References appData::all_verts, GlobalContext::appDatas, context, ErrorCode, moab::ParallelComm::exchange_tags(), MB_CHK_ERR, MB_SUCCESS, GlobalContext::pcomms, appData::primary_elems, and appData::tagList.

◆ iMOAB_UpdateMeshInfo()

ErrCode iMOAB_UpdateMeshInfo ( iMOAB_AppID  pid)

Update local mesh data structure, from file information.

Note
The method should be called after mesh modifications, for example reading a file or creating mesh in memory

Operations: Collective

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
Returns
ErrCode The error code indicating success or failure.

Definition at line 778 of file iMOAB.cpp.

779 {
780  // this will include ghost elements info
781  appData& data = context.appDatas[*pid];
782  EntityHandle fileSet = data.file_set;
783 
784  // first clear all data ranges; this can be called after ghosting
785  data.all_verts.clear();
786  data.primary_elems.clear();
787  data.local_verts.clear();
788  data.owned_verts.clear();
789  data.ghost_vertices.clear();
790  data.owned_elems.clear();
791  data.ghost_elems.clear();
792  data.mat_sets.clear();
793  data.neu_sets.clear();
794  data.diri_sets.clear();
795 
796  // Let us get all the vertex entities
797  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
798 
799  // Let us check first entities of dimension = 3
800  data.dimension = 3;
801  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
802 
803  if( data.primary_elems.empty() )
804  {
805  // Now 3-D elements. Let us check entities of dimension = 2
806  data.dimension = 2;
807  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
808 
809  if( data.primary_elems.empty() )
810  {
811  // Now 3-D/2-D elements. Let us check entities of dimension = 1
812  data.dimension = 1;
813  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
814 
815  if( data.primary_elems.empty() )
816  {
817  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
818  data.dimension = 0;
819  }
820  }
821  }
822 
823  // check if the current mesh is just a point cloud
824  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
825 
826 #ifdef MOAB_HAVE_MPI
827 
829  {
830  ParallelComm* pco = context.pcomms[*pid];
831 
832  // filter ghost vertices, from local
833  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
834 
835  // Store handles for all ghosted entities
836  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
837 
838  // filter ghost elements, from local
839  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
840 
841  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
842  // now update global number of primary cells and global number of vertices
843  // determine first number of owned vertices
844  // Get local owned vertices
845  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
846  int local[2], global[2];
847  local[0] = data.owned_verts.size();
848  local[1] = data.owned_elems.size();
849  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
850  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
851  }
852  else
853  {
854  data.local_verts = data.all_verts;
855  data.owned_elems = data.primary_elems;
856  }
857 
858 #else
859 
860  data.local_verts = data.all_verts;
861  data.owned_elems = data.primary_elems;
862 
863 #endif
864 
865  // Get the references for some standard internal tags such as material blocks, BCs, etc
867  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
868 
870  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
871 
873  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
874 
875  return moab::MB_SUCCESS;
876 }

References appData::all_verts, GlobalContext::appDatas, moab::Range::clear(), moab::ParallelComm::comm(), context, appData::dimension, appData::diri_sets, GlobalContext::dirichlet_tag, moab::Range::empty(), ErrorCode, appData::file_set, moab::ParallelComm::filter_pstatus(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), moab::Interface::get_entities_by_type_and_tag(), appData::ghost_elems, appData::ghost_vertices, iMOAB_SetGlobalInfo(), appData::local_verts, appData::mat_sets, GlobalContext::material_tag, MB_CHK_ERR, MB_SUCCESS, MBENTITYSET, GlobalContext::MBI, MBVERTEX, GlobalContext::MPI_initialized, appData::neu_sets, GlobalContext::neumann_tag, appData::owned_elems, appData::owned_verts, GlobalContext::pcomms, appData::point_cloud, appData::primary_elems, PSTATUS_GHOST, PSTATUS_NOT, PSTATUS_NOT_OWNED, moab::Range::size(), moab::subtract(), and moab::Interface::UNION.

Referenced by iMOAB_DetermineGhostEntities(), iMOAB_LoadMesh(), iMOAB_MergeVertices(), and iMOAB_ReceiveMesh().

◆ iMOAB_WriteLocalMesh()

ErrCode iMOAB_WriteLocalMesh ( iMOAB_AppID  pid,
iMOAB_String  prefix 
)

Write a local MOAB mesh copy.

Note
The interface will write one file per task. Only the local mesh set and solution data associated to the application will be written to the file. Very convenient method used for debugging.

Operations: Not Collective (independent operation).

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]prefix(iMOAB_String) The MOAB file prefix that will be used with task ID in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 760 of file iMOAB.cpp.

761 {
762  IMOAB_CHECKPOINTER( prefix, 2 );
763  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
764 
765  std::ostringstream file_name;
766  int rank = 0, size = 1;
767 #ifdef MOAB_HAVE_MPI
768  rank = context.pcomms[*pid]->rank();
769  size = context.pcomms[*pid]->size();
770 #endif
771  file_name << prefix << "_" << size << "_" << rank << ".h5m";
772  // Now let us actually write the file to disk with appropriate options
773  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
774 
775  return moab::MB_SUCCESS;
776 }

References GlobalContext::appDatas, context, ErrorCode, IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, GlobalContext::pcomms, size, and moab::Interface::write_file().

◆ iMOAB_WriteMesh()

ErrCode iMOAB_WriteMesh ( iMOAB_AppID  pid,
const iMOAB_String  filename,
const iMOAB_String  write_options 
)

Write a MOAB mesh along with the solution tags to a file.

Note
The interface will write one single file (H5M) and for serial files (VTK/Exodus), it will write one file per task. Write options include parallel write options, if needed. Only the mesh set and solution data associated to the application will be written to the file.

Operations: Collective for parallel write, non collective for serial write.

Parameters
[in]pid(iMOAB_AppID) The unique pointer to the application ID.
[in]filename(iMOAB_String) The MOAB mesh file (H5M) to write all the entities contained in the internal application mesh set.
[in]write_options(iMOAB_String) Additional options for writing the MOAB mesh in parallel.
Returns
ErrCode The error code indicating success or failure.

Definition at line 699 of file iMOAB.cpp.

700 {
701  IMOAB_CHECKPOINTER( filename, 2 );
702  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
703 
704  appData& data = context.appDatas[*pid];
705  EntityHandle fileSet = data.file_set;
706 
707  std::ostringstream newopts;
708 #ifdef MOAB_HAVE_MPI
709  std::string write_opts( ( write_options ? write_options : "" ) );
710  std::string pcid( "PARALLEL_COMM=" );
711  std::size_t found = write_opts.find( pcid );
712 
713  if( found != std::string::npos )
714  {
715  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
716  return moab::MB_FAILURE;
717  }
718 
719  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
720  std::string pw( "PARALLEL=WRITE_PART" );
721  found = write_opts.find( pw );
722 
723  if( found != std::string::npos )
724  {
725  newopts << "PARALLEL_COMM=" << *pid << ";";
726  }
727 
728 #endif
729 
730  if( write_options ) newopts << write_options;
731 
732  std::vector< Tag > copyTagList = data.tagList;
733  // append Global ID and Parallel Partition
734  std::string gid_name_tag( "GLOBAL_ID" );
735 
736  // export global id tag, we need it always
737  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
738  {
739  Tag gid = context.MBI->globalId_tag();
740  copyTagList.push_back( gid );
741  }
742  // also Parallel_Partition PARALLEL_PARTITION
743  std::string pp_name_tag( "PARALLEL_PARTITION" );
744 
745  // write parallel part tag too, if it exists
746  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
747  {
748  Tag ptag;
749  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
750  if( ptag ) copyTagList.push_back( ptag );
751  }
752 
753  // Now let us actually write the file to disk with appropriate options
754  ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
755  (int)copyTagList.size() );MB_CHK_ERR( rval );
756 
757  return moab::MB_SUCCESS;
758 }

References GlobalContext::appDatas, context, ErrorCode, appData::file_set, moab::Interface::globalId_tag(), IMOAB_ASSERT, IMOAB_CHECKPOINTER, MB_CHK_ERR, MB_SUCCESS, GlobalContext::MBI, moab::Interface::tag_get_handle(), appData::tagList, appData::tagMap, and moab::Interface::write_file().

◆ split_tag_names()

static void split_tag_names ( std::string  input_names,
std::string &  separator,
std::vector< std::string > &  list_tag_names 
)
static

Definition at line 442 of file iMOAB.cpp.

445 {
446  size_t pos = 0;
447  std::string token;
448  while( ( pos = input_names.find( separator ) ) != std::string::npos )
449  {
450  token = input_names.substr( 0, pos );
451  if( !token.empty() ) list_tag_names.push_back( token );
452  // std::cout << token << std::endl;
453  input_names.erase( 0, pos + separator.length() );
454  }
455  if( !input_names.empty() )
456  {
457  // if leftover something, or if not ended with delimiter
458  list_tag_names.push_back( input_names );
459  }
460  return;
461 }

Referenced by iMOAB_DefineTagStorage(), iMOAB_GetDoubleTagStorage(), iMOAB_ReceiveElementTag(), iMOAB_SendElementTag(), iMOAB_SetDoubleTagStorage(), and iMOAB_SetDoubleTagStorageWithGid().

Variable Documentation

◆ context