Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
moab::WriteHDF5 Class Reference

Write mesh database to MOAB's native HDF5-based file format. More...

#include <WriteHDF5.hpp>

+ Inheritance diagram for moab::WriteHDF5:
+ Collaboration diagram for moab::WriteHDF5:

Classes

struct  ExportSet
 Range of entities, grouped by type, to export. More...
 
struct  ExportType
 
struct  HDF5ErrorHandler
 Store old HDF5 error handling function. More...
 
struct  SpecialSetData
 Struct describing a set for which the contained and linked entity lists are something other than the local values. Used to store data for shared sets owned by this process when writing in parallel. More...
 
struct  SpecSetLess
 
struct  TagDesc
 Tag to write to file. More...
 

Public Types

typedef EntityHandle wid_t
 

Public Member Functions

 WriteHDF5 (Interface *iface)
 
virtual ~WriteHDF5 ()
 
ErrorCode write_file (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list=NULL, int num_tags=0, int user_dimension=3)
 
mhdf_FileHandle file_ptr ()
 
WriteUtilIfacewrite_util ()
 
ErrorCode create_elem_table (const ExportSet &block, long num_ents, long &first_id_out)
 
ErrorCode create_set_meta (long num_sets, long &first_id_out)
 
- Public Member Functions inherited from moab::WriterIface
virtual ~WriterIface ()
 

Static Public Member Functions

static WriterIfacefactory (Interface *)
 

Static Public Attributes

static const hid_t id_type = get_id_type()
 

Protected Types

enum  TimingValues {
  TOTAL_TIME = 0 , GATHER_TIME , CREATE_TIME , CREATE_NODE_TIME ,
  NEGOTIATE_TYPES_TIME , CREATE_ELEM_TIME , FILEID_EXCHANGE_TIME , CREATE_ADJ_TIME ,
  CREATE_SET_TIME , SHARED_SET_IDS , SHARED_SET_CONTENTS , SET_OFFSET_TIME ,
  CREATE_TAG_TIME , COORD_TIME , CONN_TIME , SET_TIME ,
  SET_META , SET_CONTENT , SET_PARENT , SET_CHILD ,
  ADJ_TIME , TAG_TIME , DENSE_TAG_TIME , SPARSE_TAG_TIME ,
  VARLEN_TAG_TIME , NUM_TIMES
}
 

Protected Member Functions

virtual ErrorCode parallel_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const FileOptions &opts, const Tag *tag_list, int num_tags, int dimension=3, double *times=0)
 
virtual ErrorCode write_finished ()
 
virtual void debug_barrier_line (int lineno)
 
ErrorCode gather_tags (const Tag *user_tag_list, int user_tag_list_length)
 Gather tags. More...
 
bool check_dense_format_tag (const ExportSet &ents, const Range &all_tagged, bool prefer_dense)
 
ErrorCode count_adjacencies (const Range &elements, wid_t &result)
 
ErrorCode count_set_size (const Range &sets, long &contents_length_out, long &children_length_out, long &parents_length_out)
 
ErrorCode get_set_info (EntityHandle set, long &num_entities, long &num_children, long &num_parents, unsigned long &flags)
 Get information about a meshset. More...
 
ErrorCode create_set_tables (long contents_length, long children_length, long parents_length)
 
ErrorCode write_qa (const std::vector< std::string > &list)
 Write exodus-type QA info. More...
 
ErrorCode get_num_sparse_tagged_entities (const TagDesc &tag, size_t &count)
 Get tagged entities for which to write tag values. More...
 
ErrorCode get_sparse_tagged_entities (const TagDesc &tag, Range &range)
 Get tagged entities for which to write tag values. More...
 
void get_write_entities (Range &range)
 Get entities that will be written to file. More...
 
const ExportSetfind (const ExportType &type) const
 
const SpecialSetDatafind_set_data (EntityHandle h) const
 
SpecialSetDatafind_set_data (EntityHandle h)
 
void print_id_map () const
 
void print_id_map (std::ostream &str, const char *prefix="") const
 
ErrorCode create_tag (const TagDesc &tag_data, unsigned long num_entities, unsigned long var_len_total)
 
ErrorCode assign_ids (const Range &entities, wid_t first_id)
 add entities to idMap More...
 
ErrorCode range_to_blocked_list (const Range &input_range, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_blocked_list (const EntityHandle *input_ranges, size_t num_input_ranges, std::vector< wid_t > &output_id_list, bool &ranged_list)
 
ErrorCode range_to_id_list (const Range &input_range, wid_t *array)
 
ErrorCode vector_to_id_list (const std::vector< EntityHandle > &input, std::vector< wid_t > &output, bool remove_non_written=false)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, wid_t *output, size_t num_entities)
 Get IDs for entities. More...
 
ErrorCode vector_to_id_list (const EntityHandle *input, size_t input_len, wid_t *output, size_t &output_len, bool remove_non_written)
 Get IDs for entities. More...
 
bool convert_handle_tag (EntityHandle *data, size_t count) const
 
bool convert_handle_tag (const EntityHandle *source, EntityHandle *dest, size_t count) const
 
ErrorCode get_adjacencies (EntityHandle entity, std::vector< wid_t > &adj)
 
ErrorCode get_tag_data_length (const TagDesc &tag_info, const Range &range, unsigned long &result)
 get sum of lengths of tag values (as number of type) for variable length tag data. More...
 
virtual void print_times (const double times[NUM_TIMES]) const
 

Protected Attributes

HDF5ErrorHandler errorHandler
 Store old HDF5 error handling function. More...
 
size_t bufferSize
 The size of the data buffer (dataBuffer). More...
 
char * dataBuffer
 A memory buffer to use for all I/O operations. More...
 
InterfaceiFace
 Interface pointer passed to constructor. More...
 
WriteUtilIfacewriteUtil
 Cached pointer to writeUtil interface. More...
 
mhdf_FileHandle filePtr
 The file handle from the mhdf library. More...
 
RangeMap< EntityHandle, wid_tidMap
 Map from entity handles to file IDs. More...
 
std::list< ExportSetexportList
 The list elements to export. More...
 
ExportSet nodeSet
 The list of nodes to export. More...
 
ExportSet setSet
 The list of sets to export. More...
 
unsigned long setContentsOffset
 Offset into set contents table (zero except for parallel) More...
 
unsigned long setChildrenOffset
 Offset into set children table (zero except for parallel) More...
 
unsigned long setParentsOffset
 
long maxNumSetContents
 The largest number of values to write for any processor (needed to do collective IO). More...
 
long maxNumSetChildren
 
long maxNumSetParents
 
bool writeSets
 Flags idicating if set data should be written. For the normal (non-parallel) case, these values will depend only on whether or not there is any data to be written. For parallel-meshes, opening the data table is collective so the values must depend on whether or not any processor has meshsets to be written. More...
 
bool writeSetContents
 
bool writeSetChildren
 
bool writeSetParents
 
std::vector< SpecialSetDataspecialSets
 Array of special/shared sets, in order of handle value. More...
 
std::list< TagDesctagList
 The list of tags to export. More...
 
bool parallelWrite
 True if doing parallel write. More...
 
bool collectiveIO
 True if using collective IO calls for parallel write. More...
 
bool writeTagDense
 True if writing dense-formatted tag data. More...
 
hid_t writeProp
 Property set to pass to H5Dwrite calls. For serial, should be H5P_DEFAULTS. For parallel, may request collective IO. More...
 
DebugOutput dbgOut
 Utility to log debug output. More...
 
bool debugTrack
 Look for overlapping and/or missing writes. More...
 

Static Protected Attributes

static MPEState topState
 
static MPEState subState
 

Private Member Functions

ErrorCode write_file_impl (const char *filename, const bool overwrite, const FileOptions &opts, const EntityHandle *export_sets, const int export_set_count, const std::vector< std::string > &qa_records, const Tag *tag_list, int num_tags, int user_dimension=3)
 Do the actual work of write_file. Separated from write_file for easier resource cleanup. More...
 
ErrorCode init ()
 
ErrorCode serial_create_file (const char *filename, bool overwrite, const std::vector< std::string > &qa_records, const Tag *tag_list, int num_tags, int dimension=3)
 
ErrorCode gather_mesh_info (const std::vector< EntityHandle > &export_sets)
 
ErrorCode gather_all_mesh ()
 Same as gather_mesh_info, except for entire mesh. More...
 
ErrorCode initialize_mesh (const Range entities_by_dim[5])
 Initialize internal data structures from gathered mesh. More...
 
ErrorCode write_nodes ()
 
ErrorCode write_elems (ExportSet &elemset)
 
ErrorCode write_sets (double *times)
 
ErrorCode write_set_data (const WriteUtilIface::EntityListType which_data, const hid_t handle, IODebugTrack &track, Range *ranged=0, Range *null_stripped=0, std::vector< long > *set_sizes=0)
 
ErrorCode write_adjacencies (const ExportSet &export_set)
 
ErrorCode write_tag (const TagDesc &tag_data, double *times)
 Write tag for all entities. More...
 
ErrorCode get_connectivity (Range::const_iterator begin, Range::const_iterator end, int nodes_per_element, wid_t *id_data_out)
 Get element connectivity. More...
 
ErrorCode get_tag_size (Tag tag, DataType &moab_type, int &num_bytes, int &elem_size, int &file_size, mhdf_TagDataType &file_type, hid_t &hdf_type)
 Get size data for tag. More...
 
ErrorCode write_sparse_ids (const TagDesc &tag_data, const Range &range, hid_t table_handle, size_t table_size, const char *name=0)
 Write ID table for sparse tag. More...
 
ErrorCode write_sparse_tag (const TagDesc &tag_data, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size)
 Write fixed-length tag data in sparse format. More...
 
ErrorCode write_var_len_indices (const TagDesc &tag_data, const Range &range, hid_t idx_table, size_t table_size, int type_size, const char *name=0)
 Write end index data_set for a variable-length tag. More...
 
ErrorCode write_var_len_data (const TagDesc &tag_data, const Range &range, hid_t table, size_t table_size, bool handle_tag, hid_t hdf_type, int type_size, const char *name=0)
 Write tag value data_set for a variable-length tag. More...
 
ErrorCode write_var_len_tag (const TagDesc &tag_info, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_type, int hdf5_type_size)
 Write varialbe-length tag data. More...
 
ErrorCode write_dense_tag (const TagDesc &tag_data, const ExportSet &elem_data, const std::string &tag_name, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size)
 Write dense-formatted tag data. More...
 
ErrorCode write_tag_values (Tag tag_id, hid_t data_table, unsigned long data_offset, const Range &range, DataType tag_data_type, hid_t hdf5_data_type, int hdf5_type_size, unsigned long max_num_ents, IODebugTrack &debug_track)
 Write data for fixed-size tag. More...
 

Detailed Description

Write mesh database to MOAB's native HDF5-based file format.

Author
Jason Kraftcheck
Date
01 April 2004

Definition at line 47 of file WriteHDF5.hpp.

Member Typedef Documentation

◆ wid_t

The type to use for entity IDs w/in the file.

NOTE: If this is changed, the value of id_type MUST be changed accordingly.

Definition at line 77 of file WriteHDF5.hpp.

Member Enumeration Documentation

◆ TimingValues

Enumerator
TOTAL_TIME 
GATHER_TIME 
CREATE_TIME 
CREATE_NODE_TIME 
NEGOTIATE_TYPES_TIME 
CREATE_ELEM_TIME 
FILEID_EXCHANGE_TIME 
CREATE_ADJ_TIME 
CREATE_SET_TIME 
SHARED_SET_IDS 
SHARED_SET_CONTENTS 
SET_OFFSET_TIME 
CREATE_TAG_TIME 
COORD_TIME 
CONN_TIME 
SET_TIME 
SET_META 
SET_CONTENT 
SET_PARENT 
SET_CHILD 
ADJ_TIME 
TAG_TIME 
DENSE_TAG_TIME 
SPARSE_TAG_TIME 
VARLEN_TAG_TIME 
NUM_TIMES 

Definition at line 690 of file WriteHDF5.hpp.

691  {
692  TOTAL_TIME = 0,
693  GATHER_TIME,
694  CREATE_TIME,
705  COORD_TIME,
706  CONN_TIME,
707  SET_TIME,
708  SET_META,
709  SET_CONTENT,
710  SET_PARENT,
711  SET_CHILD,
712  ADJ_TIME,
713  TAG_TIME,
717  NUM_TIMES
718  };

Constructor & Destructor Documentation

◆ WriteHDF5()

moab::WriteHDF5::WriteHDF5 ( Interface iface)

Definition at line 396 of file WriteHDF5.cpp.

399  maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets( false ), writeSetContents( false ),
400  writeSetChildren( false ), writeSetParents( false ), parallelWrite( false ), collectiveIO( false ),
401  writeTagDense( false ), writeProp( H5P_DEFAULT ), dbgOut( "H5M", stderr ), debugTrack( false )
402 {
403 }

Referenced by factory().

◆ ~WriteHDF5()

moab::WriteHDF5::~WriteHDF5 ( )
virtual

Definition at line 484 of file WriteHDF5.cpp.

485 {
486  if( !writeUtil ) // init() failed.
487  return;
488 
490 }

References iFace, moab::Interface::release_interface(), and writeUtil.

Member Function Documentation

◆ assign_ids()

ErrorCode moab::WriteHDF5::assign_ids ( const Range entities,
wid_t  first_id 
)
protected

add entities to idMap

Definition at line 351 of file WriteHDF5.cpp.

352 {
353  Range::const_pair_iterator pi;
354  for( pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi )
355  {
356  const EntityHandle n = pi->second - pi->first + 1;
357  dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n",
358  CN::EntityTypeName( TYPE_FROM_HANDLE( pi->first ) ),
359  (unsigned long)( ID_FROM_HANDLE( pi->first ) ),
360  (unsigned long)( ID_FROM_HANDLE( pi->first ) + n - 1 ), (unsigned long)id,
361  (unsigned long)( id + n - 1 ) );
362  if( TYPE_FROM_HANDLE( pi->first ) == MBPOLYGON || TYPE_FROM_HANDLE( pi->first ) == MBPOLYHEDRON )
363  {
364  int num_vertices = 0;
365  const EntityHandle* conn = 0;
366  iFace->get_connectivity( pi->first, conn, num_vertices );
367  dbgOut.printf( 3, " poly with %d verts/faces \n", num_vertices );
368  }
369  if( !idMap.insert( pi->first, id, n ).second ) return error( MB_FAILURE );
370  id += n;
371  }
372 
373  return MB_SUCCESS;
374 }

References dbgOut, entities, moab::CN::EntityTypeName(), moab::error(), moab::Interface::get_connectivity(), moab::ID_FROM_HANDLE(), idMap, iFace, moab::RangeMap< KeyType, ValType, NullVal >::insert(), MB_SUCCESS, MBPOLYGON, MBPOLYHEDRON, moab::DebugOutput::printf(), and moab::TYPE_FROM_HANDLE().

Referenced by moab::WriteHDF5Parallel::create_element_tables(), moab::WriteHDF5Parallel::create_meshset_tables(), moab::WriteHDF5Parallel::create_node_table(), and serial_create_file().

◆ check_dense_format_tag()

bool moab::WriteHDF5::check_dense_format_tag ( const ExportSet ents,
const Range all_tagged,
bool  prefer_dense 
)
protected

Check if tag values for a given ExportSet should be written in dense format

Parameters
entsExportSet to consider
all_taggedRange containing all the entities in ents.range for which an explicit tag value is stored. Range may also contain entities not in ents.range, but may not contain entities in ents.range for which no tag value is stored.
prefer_denseIf true, will return true if at least 2/3 of the entities are tagged. This should not be passed as true if the tag does not have a default value, as tag values must be stored for all entities in the ExportSet for dense-formatted data.

Definition at line 2753 of file WriteHDF5.cpp.

2754 {
2755  // If there are no tagged entities, then don't write anything
2756  if( ents.range.empty() ) return false;
2757 
2758  // If all of the entities are tagged, then write in dense format
2759  if( all_tagged.contains( ents.range ) ) return true;
2760 
2761  // Unless asked for more lenient choice of dense format, return false
2762  if( !prefer_dense ) return false;
2763 
2764  // If we're being lenient about choosing dense format, then
2765  // return true if at least 2/3 of the entities are tagged.
2766  Range xsect = intersect( setSet.range, all_tagged );
2767  if( 3 * xsect.size() >= 2 * setSet.range.size() ) return true;
2768 
2769  return false;
2770 }

References moab::Range::contains(), moab::Range::empty(), moab::intersect(), moab::WriteHDF5::ExportSet::range, setSet, and moab::Range::size().

Referenced by moab::WriteHDF5Parallel::create_tag_tables(), and serial_create_file().

◆ convert_handle_tag() [1/2]

bool moab::WriteHDF5::convert_handle_tag ( const EntityHandle source,
EntityHandle dest,
size_t  count 
) const
protected

Definition at line 328 of file WriteHDF5.cpp.

329 {
330  bool some_valid = false;
331  for( size_t i = 0; i < count; ++i )
332  {
333  if( !source[i] )
334  dest[i] = 0;
335  else
336  {
337  dest[i] = idMap.find( source[i] );
338  if( dest[i] ) some_valid = true;
339  }
340  }
341 
342  return some_valid;
343 }

References moab::RangeMap< KeyType, ValType, NullVal >::find(), and idMap.

◆ convert_handle_tag() [2/2]

bool moab::WriteHDF5::convert_handle_tag ( EntityHandle data,
size_t  count 
) const
protected

When writing tags containing EntityHandles to file, need to convert tag data from EntityHandles to file IDs. This function does that.

If the handle is not valid or does not correspond to an entity that will be written to the file, the file ID is set to zero.

Parameters
dataThe data buffer. As input, an array of EntityHandles. As output an array of file IDS, where the size of each integral file ID is the same as the size of EntityHandle.
countThe number of handles in the buffer.
Returns
true if at least one of the handles is valid and will be written to the file or at least one of the handles is NULL (zero). false otherwise

Definition at line 345 of file WriteHDF5.cpp.

346 {
347  assert( sizeof( EntityHandle ) == sizeof( wid_t ) );
348  return convert_handle_tag( data, data, count );
349 }

Referenced by create_tag(), write_tag_values(), and write_var_len_data().

◆ count_adjacencies()

ErrorCode moab::WriteHDF5::count_adjacencies ( const Range elements,
wid_t result 
)
protected

Helper function for create-file

Calculate the sum of the number of non-set adjacencies of all entities in the passed range.

Definition at line 2772 of file WriteHDF5.cpp.

2773 {
2774  ErrorCode rval;
2775  std::vector< wid_t > adj_list;
2776  Range::const_iterator iter = set.begin();
2777  const Range::const_iterator end = set.end();
2778  result = 0;
2779  for( ; iter != end; ++iter )
2780  {
2781  adj_list.clear();
2782  rval = get_adjacencies( *iter, adj_list );
2783  CHK_MB_ERR_0( rval );
2784 
2785  if( adj_list.size() > 0 ) result += 2 + adj_list.size();
2786  }
2787 
2788  return MB_SUCCESS;
2789 }

References moab::Range::begin(), CHK_MB_ERR_0, moab::Range::end(), ErrorCode, get_adjacencies(), and MB_SUCCESS.

Referenced by moab::WriteHDF5Parallel::create_adjacency_tables(), and serial_create_file().

◆ count_set_size()

ErrorCode moab::WriteHDF5::count_set_size ( const Range sets,
long &  contents_length_out,
long &  children_length_out,
long &  parents_length_out 
)
protected

Helper function for create-file

Calculate total length of set contents and child tables.

Definition at line 2809 of file WriteHDF5.cpp.

2813 {
2814  ErrorCode rval;
2815  Range set_contents;
2816  long contents_length_set, children_length_set, parents_length_set;
2817  unsigned long flags;
2818  std::vector< wid_t > set_contents_ids;
2819  std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
2820 
2821  contents_length_out = 0;
2822  children_length_out = 0;
2823  parents_length_out = 0;
2824 
2825  for( Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter )
2826  {
2827  while( si != specialSets.end() && si->setHandle < *iter )
2828  ++si;
2829 
2830  if( si != specialSets.end() && si->setHandle == *iter )
2831  {
2832  contents_length_out += si->contentIds.size();
2833  children_length_out += si->childIds.size();
2834  parents_length_out += si->parentIds.size();
2835  ++si;
2836  continue;
2837  }
2838 
2839  rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags );
2840  CHK_MB_ERR_0( rval );
2841 
2842  // Check if can and should compress as ranges
2843  if( !( flags & MESHSET_ORDERED ) && contents_length_set )
2844  {
2845  set_contents.clear();
2846  rval = iFace->get_entities_by_handle( *iter, set_contents, false );
2847  CHK_MB_ERR_0( rval );
2848 
2849  bool blocked_list;
2850  rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list );
2851  CHK_MB_ERR_0( rval );
2852 
2853  if( blocked_list )
2854  {
2855  assert( set_contents_ids.size() % 2 == 0 );
2856  contents_length_set = set_contents_ids.size();
2857  }
2858  }
2859 
2860  contents_length_out += contents_length_set;
2861  children_length_out += children_length_set;
2862  parents_length_out += parents_length_set;
2863  }
2864 
2865  return MB_SUCCESS;
2866 }

References moab::Range::begin(), CHK_MB_ERR_0, moab::Range::clear(), moab::Range::end(), ErrorCode, moab::Interface::get_entities_by_handle(), get_set_info(), iFace, MB_SUCCESS, range_to_blocked_list(), and specialSets.

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), and serial_create_file().

◆ create_elem_table()

ErrorCode moab::WriteHDF5::create_elem_table ( const ExportSet block,
long  num_ents,
long &  first_id_out 
)

Helper function for create-file

Create zero-ed tables where element connectivity and adjacency data will be stored.

Definition at line 2791 of file WriteHDF5.cpp.

2792 {
2793  mhdf_Status status;
2794  hid_t handle;
2795 
2797 
2798  mhdf_addElement( filePtr, block.name(), block.type, &status );
2799  CHK_MHDF_ERR_0( status );
2800 
2801  handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status );
2802  CHK_MHDF_ERR_0( status );
2803  mhdf_closeData( filePtr, handle, &status );
2804  CHK_MHDF_ERR_0( status );
2805 
2806  return MB_SUCCESS;
2807 }

References CHECK_OPEN_HANDLES, CHK_MHDF_ERR_0, filePtr, MB_SUCCESS, mhdf_addElement(), mhdf_closeData(), mhdf_createConnectivity(), moab::WriteHDF5::ExportSet::name(), moab::WriteHDF5::ExportType::num_nodes, and moab::WriteHDF5::ExportType::type.

Referenced by moab::WriteHDF5Parallel::create_element_tables(), and serial_create_file().

◆ create_set_meta()

ErrorCode moab::WriteHDF5::create_set_meta ( long  num_sets,
long &  first_id_out 
)

Helper function for create-file

Create zero-ed table where set descriptions will be written

Definition at line 2868 of file WriteHDF5.cpp.

2869 {
2870  hid_t handle;
2871  mhdf_Status status;
2872 
2874 
2875  handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status );
2876  CHK_MHDF_ERR_0( status );
2877  mhdf_closeData( filePtr, handle, &status );
2878 
2879  return MB_SUCCESS;
2880 }

References CHECK_OPEN_HANDLES, CHK_MHDF_ERR_0, filePtr, MB_SUCCESS, mhdf_closeData(), and mhdf_createSetMeta().

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), and serial_create_file().

◆ create_set_tables()

ErrorCode moab::WriteHDF5::create_set_tables ( long  contents_length,
long  children_length,
long  parents_length 
)
protected

Helper function for create-file

Create zero-ed tables where set data will be written.

Definition at line 2891 of file WriteHDF5.cpp.

2892 {
2893  hid_t handle;
2894  mhdf_Status status;
2895 
2897 
2898  if( num_set_contents > 0 )
2899  {
2900  handle = mhdf_createSetData( filePtr, num_set_contents, &status );
2901  CHK_MHDF_ERR_0( status );
2902  mhdf_closeData( filePtr, handle, &status );
2903  }
2904 
2905  if( num_set_children > 0 )
2906  {
2907  handle = mhdf_createSetChildren( filePtr, num_set_children, &status );
2908  CHK_MHDF_ERR_0( status );
2909  mhdf_closeData( filePtr, handle, &status );
2910  }
2911 
2912  if( num_set_parents > 0 )
2913  {
2914  handle = mhdf_createSetParents( filePtr, num_set_parents, &status );
2915  CHK_MHDF_ERR_0( status );
2916  mhdf_closeData( filePtr, handle, &status );
2917  }
2918 
2919  return MB_SUCCESS;
2920 }

References CHECK_OPEN_HANDLES, CHK_MHDF_ERR_0, filePtr, MB_SUCCESS, mhdf_closeData(), mhdf_createSetChildren(), mhdf_createSetData(), and mhdf_createSetParents().

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), and serial_create_file().

◆ create_tag()

ErrorCode moab::WriteHDF5::create_tag ( const TagDesc tag_data,
unsigned long  num_entities,
unsigned long  var_len_total 
)
protected

Helper function for create-file

Write tag meta-info and create zero-ed table where tag values will be written.

Parameters
num_entitiesNumber of entities for which to write tag data.
var_len_totalFor variable-length tags, the total number of values in the data table.

Definition at line 3096 of file WriteHDF5.cpp.

3099 {
3100  TagType mb_storage;
3101  DataType mb_type;
3102  mhdf_TagDataType mhdf_type;
3103  int tag_bytes, type_size, num_vals, storage;
3104  hid_t hdf_type = (hid_t)0;
3105  hid_t handles[3];
3106  std::string tag_name;
3107  ErrorCode rval;
3108  mhdf_Status status;
3109 
3111 
3112  // Get tag properties
3113  rval = iFace->tag_get_type( tag_data.tag_id, mb_storage );
3114  CHK_MB_ERR_0( rval );
3115  switch( mb_storage )
3116  {
3117  case MB_TAG_DENSE:
3118  storage = mhdf_DENSE_TYPE;
3119  break;
3120  case MB_TAG_SPARSE:
3121  storage = mhdf_SPARSE_TYPE;
3122  break;
3123  case MB_TAG_BIT:
3124  storage = mhdf_BIT_TYPE;
3125  break;
3126  case MB_TAG_MESH:
3127  storage = mhdf_MESH_TYPE;
3128  break;
3129  default:
3130  return error( MB_FAILURE );
3131  }
3132  rval = iFace->tag_get_name( tag_data.tag_id, tag_name );
3133  CHK_MB_ERR_0( rval );
3134  rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type );
3135  CHK_MB_ERR_0( rval );
3136 
3137  // Get default value
3138  const void *def_value, *mesh_value;
3139  int def_val_len, mesh_val_len;
3140  rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len );
3141  if( MB_ENTITY_NOT_FOUND == rval )
3142  {
3143  def_value = 0;
3144  def_val_len = 0;
3145  }
3146  else if( MB_SUCCESS != rval )
3147  {
3148  H5Tclose( hdf_type );
3149  return error( rval );
3150  }
3151 
3152  // Get mesh value
3153  unsigned char byte;
3154  const EntityHandle root = 0;
3155  if( mb_storage == MB_TAG_BIT )
3156  {
3157  rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte );
3158  mesh_value = &byte;
3159  mesh_val_len = 1;
3160  }
3161  else
3162  {
3163  rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len );
3164  }
3165  if( MB_TAG_NOT_FOUND == rval )
3166  {
3167  mesh_value = 0;
3168  mesh_val_len = 0;
3169  }
3170  else if( MB_SUCCESS != rval )
3171  {
3172  H5Tclose( hdf_type );
3173  return error( rval );
3174  }
3175 
3176  // For handle-type tags, need to convert from handles to file ids
3177  if( MB_TYPE_HANDLE == mb_type )
3178  {
3179  // Make sure there's room in the buffer for both
3180  assert( ( def_val_len + mesh_val_len ) * sizeof( long ) < (size_t)bufferSize );
3181 
3182  // Convert default value
3183  if( def_value )
3184  {
3185  memcpy( dataBuffer, def_value, def_val_len * sizeof( EntityHandle ) );
3186  convert_handle_tag( reinterpret_cast< EntityHandle* >( dataBuffer ), def_val_len );
3187  def_value = dataBuffer;
3188  }
3189 
3190  // Convert mesh value
3191  if( mesh_value )
3192  {
3193  EntityHandle* ptr = reinterpret_cast< EntityHandle* >( dataBuffer ) + def_val_len;
3194  memcpy( ptr, mesh_value, mesh_val_len * sizeof( EntityHandle ) );
3195  if( convert_handle_tag( ptr, mesh_val_len ) )
3196  mesh_value = ptr;
3197  else
3198  mesh_value = 0;
3199  }
3200  }
3201 
3202  if( MB_VARIABLE_LENGTH != tag_bytes )
3203  {
3204  // Write the tag description to the file
3205  mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type,
3206  mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
3207  CHK_MHDF_ERR_0( status );
3208  H5Tclose( hdf_type );
3209 
3210  // Create empty table for tag data
3211  if( num_sparse_entities )
3212  {
3213  mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status );
3214  CHK_MHDF_ERR_0( status );
3215  mhdf_closeData( filePtr, handles[0], &status );
3216  mhdf_closeData( filePtr, handles[1], &status );
3217  }
3218 
3219  for( size_t i = 0; i < tag_data.dense_list.size(); ++i )
3220  {
3221  const ExportSet* ex = find( tag_data.dense_list[i] );
3222  assert( 0 != ex );
3223  handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status );
3224  CHK_MHDF_ERR_0( status );
3225  mhdf_closeData( filePtr, handles[0], &status );
3226  }
3227  }
3228  else
3229  {
3230  mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value,
3231  mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
3232  CHK_MHDF_ERR_0( status );
3233  H5Tclose( hdf_type );
3234 
3235  // Create empty table for tag data
3236  if( num_sparse_entities )
3237  {
3238  mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles,
3239  &status );
3240  CHK_MHDF_ERR_0( status );
3241  mhdf_closeData( filePtr, handles[0], &status );
3242  mhdf_closeData( filePtr, handles[1], &status );
3243  mhdf_closeData( filePtr, handles[2], &status );
3244  }
3245  }
3246 
3247  return MB_SUCCESS;
3248 }

References bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, convert_handle_tag(), dataBuffer, moab::WriteHDF5::TagDesc::dense_list, moab::error(), ErrorCode, filePtr, find(), get_tag_size(), id_type, iFace, MB_ENTITY_NOT_FOUND, MB_SUCCESS, MB_TAG_BIT, MB_TAG_DENSE, MB_TAG_MESH, MB_TAG_NOT_FOUND, MB_TAG_SPARSE, MB_TYPE_HANDLE, MB_VARIABLE_LENGTH, mhdf_BIT_TYPE, mhdf_closeData(), mhdf_createDenseTagData(), mhdf_createSparseTagData(), mhdf_createTag(), mhdf_createVarLenTag(), mhdf_createVarLenTagData(), mhdf_DENSE_TYPE, mhdf_MESH_TYPE, mhdf_SPARSE_TYPE, moab::WriteHDF5::ExportSet::name(), moab::Interface::tag_get_by_ptr(), moab::Interface::tag_get_data(), moab::Interface::tag_get_default_value(), moab::Interface::tag_get_name(), moab::Interface::tag_get_type(), moab::WriteHDF5::TagDesc::tag_id, TagType, and moab::WriteHDF5::ExportSet::total_num_ents.

Referenced by moab::WriteHDF5Parallel::create_tag_tables(), and serial_create_file().

◆ debug_barrier_line()

void moab::WriteHDF5::debug_barrier_line ( int  lineno)
protectedvirtual

Reimplemented in moab::WriteHDF5Parallel.

Definition at line 293 of file WriteHDF5.cpp.

293 {}

◆ factory()

WriterIface * moab::WriteHDF5::factory ( Interface iface)
static

Definition at line 391 of file WriteHDF5.cpp.

392 {
393  return new WriteHDF5( iface );
394 }

References iface, and WriteHDF5().

Referenced by moab::ReaderWriterSet::ReaderWriterSet().

◆ file_ptr()

mhdf_FileHandle moab::WriteHDF5::file_ptr ( )
inline

Definition at line 199 of file WriteHDF5.hpp.

200  {
201  return filePtr;
202  }

References filePtr.

Referenced by moab::WriteHDF5Parallel::create_adjacency_tables(), and moab::WriteHDF5Parallel::create_node_table().

◆ find()

const ExportSet* moab::WriteHDF5::find ( const ExportType type) const
inlineprotected

Definition at line 323 of file WriteHDF5.hpp.

324  {
325  if( type.type == MBVERTEX )
326  return &nodeSet;
327  else if( type.type == MBENTITYSET )
328  return &setSet;
329  else
330  {
331  std::list< ExportSet >::const_iterator it;
332  it = std::find( exportList.begin(), exportList.end(), type );
333  return it == exportList.end() ? 0 : &*it;
334  }
335  }

References exportList, MBENTITYSET, MBVERTEX, nodeSet, setSet, and moab::WriteHDF5::ExportType::type.

Referenced by create_tag(), gather_mesh_info(), and write_tag().

◆ find_set_data() [1/2]

WriteHDF5::SpecialSetData * moab::WriteHDF5::find_set_data ( EntityHandle  h)
protected

Definition at line 2882 of file WriteHDF5.cpp.

2883 {
2884  SpecialSetData tmp;
2885  tmp.setHandle = h;
2886  std::vector< SpecialSetData >::iterator i;
2887  i = std::lower_bound( specialSets.begin(), specialSets.end(), tmp, SpecSetLess() );
2888  return ( i == specialSets.end() || i->setHandle != h ) ? 0 : &*i;
2889 }

References moab::WriteHDF5::SpecialSetData::setHandle, and specialSets.

◆ find_set_data() [2/2]

const SpecialSetData* moab::WriteHDF5::find_set_data ( EntityHandle  h) const
inlineprotected

Definition at line 374 of file WriteHDF5.hpp.

375  {
376  return const_cast< WriteHDF5* >( this )->find_set_data( h );
377  }

Referenced by moab::WriteHDF5Parallel::unpack_set().

◆ gather_all_mesh()

ErrorCode moab::WriteHDF5::gather_all_mesh ( )
private

Same as gather_mesh_info, except for entire mesh.

Definition at line 881 of file WriteHDF5.cpp.

882 {
883  ErrorCode rval;
884  Range ranges[5];
885 
886  rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] );
887  if( MB_SUCCESS != rval ) return error( rval );
888 
889  rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] );
890  if( MB_SUCCESS != rval ) return error( rval );
891 
892  rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] );
893  if( MB_SUCCESS != rval ) return error( rval );
894 
895  rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] );
896  if( MB_SUCCESS != rval ) return error( rval );
897 
898  rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] );
899  if( MB_SUCCESS != rval ) return error( rval );
900 
901  return initialize_mesh( ranges );
902 }

References moab::error(), ErrorCode, moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), iFace, initialize_mesh(), MB_SUCCESS, MBENTITYSET, and MBVERTEX.

Referenced by write_file_impl().

◆ gather_mesh_info()

ErrorCode moab::WriteHDF5::gather_mesh_info ( const std::vector< EntityHandle > &  export_sets)
private

Get all mesh to export from given list of sets.

Populate exportSets, nodeSet and setSet with lists of entities to write.

Parameters
export_setsThe list of meshsets to export

Definition at line 809 of file WriteHDF5.cpp.

810 {
811  ErrorCode rval;
812 
813  int dim;
814  Range range; // Temporary storage
815  Range ranges[5]; // Lists of entities to export, grouped by dimension
816 
817  // Gather list of all related sets
818  std::vector< EntityHandle > stack( export_sets );
819  std::copy( export_sets.begin(), export_sets.end(), stack.begin() );
820  std::vector< EntityHandle > set_children;
821  while( !stack.empty() )
822  {
823  EntityHandle meshset = stack.back();
824  stack.pop_back();
825  ranges[4].insert( meshset );
826 
827  // Get contained sets
828  range.clear();
829  rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range );
830  CHK_MB_ERR_0( rval );
831  for( Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor )
832  {
833  if( ranges[4].find( *ritor ) == ranges[4].end() ) stack.push_back( *ritor );
834  }
835 
836  // Get child sets
837  set_children.clear();
838  rval = iFace->get_child_meshsets( meshset, set_children, 1 );
839  CHK_MB_ERR_0( rval );
840  for( std::vector< EntityHandle >::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor )
841  {
842  if( ranges[4].find( *vitor ) == ranges[4].end() ) stack.push_back( *vitor );
843  }
844  }
845 
846  // Gather list of all mesh entities from list of sets,
847  // grouped by dimension.
848  for( Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor )
849  {
850  for( dim = 0; dim < 4; ++dim )
851  {
852  range.clear();
853  rval = iFace->get_entities_by_dimension( *setitor, dim, range, false );
854  CHK_MB_ERR_0( rval );
855 
856  ranges[dim].merge( range );
857  }
858  }
859 
860  // For each list of elements, append adjacent children and
861  // nodes to lists.
862  for( dim = 3; dim > 0; --dim )
863  {
864  for( int cdim = 1; cdim < dim; ++cdim )
865  {
866  range.clear();
867  rval = iFace->get_adjacencies( ranges[dim], cdim, false, range );
868  CHK_MB_ERR_0( rval );
869  ranges[cdim].merge( range );
870  }
871  range.clear();
872  rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range );
873  CHK_MB_ERR_0( rval );
874  ranges[0].merge( range );
875  }
876 
877  return initialize_mesh( ranges );
878 }

References moab::Range::begin(), CHK_MB_ERR_0, moab::Range::clear(), dim, moab::Range::end(), ErrorCode, find(), moab::WriteUtilIface::gather_nodes_from_elements(), moab::Interface::get_adjacencies(), moab::Interface::get_child_meshsets(), moab::Interface::get_entities_by_dimension(), moab::Interface::get_entities_by_type(), iFace, initialize_mesh(), moab::Range::insert(), MBENTITYSET, moab::Range::merge(), and writeUtil.

Referenced by write_file_impl().

◆ gather_tags()

ErrorCode moab::WriteHDF5::gather_tags ( const Tag user_tag_list,
int  user_tag_list_length 
)
protected

Gather tags.

Definition at line 2504 of file WriteHDF5.cpp.

2505 {
2506  ErrorCode result;
2507  std::vector< Tag > tag_list;
2508  std::vector< Tag >::iterator t_itor;
2509  Range range;
2510 
2511  // Get list of Tags to write
2512  result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags );
2513  CHK_MB_ERR_0( result );
2514 
2515  // Get list of tags
2516  for( t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor )
2517  {
2518  // Add tag to export list
2519  TagDesc tag_data;
2520  tag_data.write_sparse = false;
2521  tag_data.tag_id = *t_itor;
2522  tag_data.sparse_offset = 0;
2523  tag_data.var_data_offset = 0;
2524  tag_data.max_num_ents = 0;
2525  tag_data.max_num_vals = 0;
2526  tagList.push_back( tag_data );
2527  }
2528 
2529  return MB_SUCCESS;
2530 }

References CHK_MB_ERR_0, ErrorCode, moab::WriteUtilIface::get_tag_list(), moab::WriteHDF5::TagDesc::max_num_ents, moab::WriteHDF5::TagDesc::max_num_vals, MB_SUCCESS, moab::WriteHDF5::TagDesc::sparse_offset, moab::WriteHDF5::TagDesc::tag_id, tagList, moab::WriteHDF5::TagDesc::var_data_offset, moab::WriteHDF5::TagDesc::write_sparse, and writeUtil.

Referenced by moab::WriteHDF5Parallel::parallel_create_file(), and serial_create_file().

◆ get_adjacencies()

ErrorCode moab::WriteHDF5::get_adjacencies ( EntityHandle  entity,
std::vector< wid_t > &  adj 
)
inlineprotected

Get IDs of adjacent entities.

For all entities adjacent to the passed entity, if the adjacent entity is to be exported (ID is not zero), append the ID to the passed list.

Definition at line 1774 of file WriteHDF5.cpp.

1775 {
1776  const EntityHandle* adj_array;
1777  int num_adj;
1778  ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj );
1779  if( MB_SUCCESS != rval ) return error( rval );
1780 
1781  size_t j = 0;
1782  adj.resize( num_adj );
1783  for( int i = 0; i < num_adj; ++i )
1784  if( wid_t id = idMap.find( adj_array[i] ) ) adj[j++] = id;
1785  adj.resize( j );
1786 
1787  return MB_SUCCESS;
1788 }

References moab::error(), ErrorCode, moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::WriteUtilIface::get_adjacencies(), idMap, MB_SUCCESS, and writeUtil.

Referenced by count_adjacencies(), and write_adjacencies().

◆ get_connectivity()

ErrorCode moab::WriteHDF5::get_connectivity ( Range::const_iterator  begin,
Range::const_iterator  end,
int  nodes_per_element,
wid_t id_data_out 
)
private

Get element connectivity.

◆ get_num_sparse_tagged_entities()

ErrorCode moab::WriteHDF5::get_num_sparse_tagged_entities ( const TagDesc tag,
size_t &  count 
)
protected

Get tagged entities for which to write tag values.

Definition at line 3250 of file WriteHDF5.cpp.

3251 {
3252  Range tmp;
3253  ErrorCode rval = get_sparse_tagged_entities( tag, tmp );
3254  count = tmp.size();
3255  return rval;
3256 }

References ErrorCode, get_sparse_tagged_entities(), and moab::Range::size().

Referenced by moab::WriteHDF5Parallel::create_tag_tables().

◆ get_set_info()

ErrorCode moab::WriteHDF5::get_set_info ( EntityHandle  set,
long &  num_entities,
long &  num_children,
long &  num_parents,
unsigned long &  flags 
)
protected

Get information about a meshset.

Definition at line 1102 of file WriteHDF5.cpp.

1107 {
1108  ErrorCode rval;
1109  int i;
1110  unsigned int u;
1111 
1112  rval = iFace->get_number_entities_by_handle( set, i, false );
1113  CHK_MB_ERR_0( rval );
1114  num_entities = i;
1115 
1116  rval = iFace->num_child_meshsets( set, &i );
1117  CHK_MB_ERR_0( rval );
1118  num_children = i;
1119 
1120  rval = iFace->num_parent_meshsets( set, &i );
1121  CHK_MB_ERR_0( rval );
1122  num_parents = i;
1123 
1124  rval = iFace->get_meshset_options( set, u );
1125  CHK_MB_ERR_0( rval );
1126  flags = u;
1127 
1128  return MB_SUCCESS;
1129 }

References CHK_MB_ERR_0, ErrorCode, moab::Interface::get_meshset_options(), moab::Interface::get_number_entities_by_handle(), iFace, MB_SUCCESS, moab::Interface::num_child_meshsets(), and moab::Interface::num_parent_meshsets().

Referenced by count_set_size(), and write_sets().

◆ get_sparse_tagged_entities()

ErrorCode moab::WriteHDF5::get_sparse_tagged_entities ( const TagDesc tag,
Range range 
)
protected

Get tagged entities for which to write tag values.

Definition at line 3258 of file WriteHDF5.cpp.

3259 {
3260  results.clear();
3261  if( !tag.have_dense( setSet ) ) results.merge( setSet.range );
3262  std::list< ExportSet >::reverse_iterator e;
3263  for( e = exportList.rbegin(); e != exportList.rend(); ++e )
3264  {
3265  if( !tag.have_dense( *e ) ) results.merge( e->range );
3266  }
3267  if( !tag.have_dense( nodeSet ) ) results.merge( nodeSet.range );
3268  if( results.empty() ) return MB_SUCCESS;
3269 
3270  return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT );
3271 }

References moab::Range::clear(), moab::Range::empty(), exportList, moab::Interface::get_entities_by_type_and_tag(), moab::WriteHDF5::TagDesc::have_dense(), iFace, moab::Interface::INTERSECT, MB_SUCCESS, MBMAXTYPE, moab::Range::merge(), nodeSet, moab::WriteHDF5::ExportSet::range, setSet, and moab::WriteHDF5::TagDesc::tag_id.

Referenced by moab::WriteHDF5Parallel::create_tag_tables(), get_num_sparse_tagged_entities(), serial_create_file(), write_sparse_tag(), and write_var_len_tag().

◆ get_tag_data_length()

ErrorCode moab::WriteHDF5::get_tag_data_length ( const TagDesc tag_info,
const Range range,
unsigned long &  result 
)
protected

get sum of lengths of tag values (as number of type) for variable length tag data.

Definition at line 3057 of file WriteHDF5.cpp.

3058 {
3059  ErrorCode rval;
3060  result = 0;
3061 
3062  // Split buffer into two pieces, one for pointers and one for sizes
3063  size_t step, remaining;
3064  step = bufferSize / ( sizeof( int ) + sizeof( void* ) );
3065  const void** ptr_buffer = reinterpret_cast< const void** >( dataBuffer );
3066  int* size_buffer = reinterpret_cast< int* >( ptr_buffer + step );
3067  Range subrange;
3068  Range::const_iterator iter = range.begin();
3069  for( remaining = range.size(); remaining >= step; remaining -= step )
3070  {
3071  // Get subset of range containing 'count' entities
3072  Range::const_iterator end = iter;
3073  end += step;
3074  subrange.clear();
3075  subrange.merge( iter, end );
3076  iter = end;
3077  // Get tag sizes for entities
3078  rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
3079  if( MB_SUCCESS != rval ) return error( rval );
3080  // Sum lengths
3081  for( size_t i = 0; i < step; ++i )
3082  result += size_buffer[i];
3083  }
3084  // Process remaining
3085  subrange.clear();
3086  subrange.merge( iter, range.end() );
3087  assert( subrange.size() == remaining );
3088  rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
3089  if( MB_SUCCESS != rval ) return error( rval );
3090  for( size_t i = 0; i < remaining; ++i )
3091  result += size_buffer[i];
3092 
3093  return MB_SUCCESS;
3094 }

References moab::Range::begin(), bufferSize, moab::Range::clear(), dataBuffer, moab::Range::end(), moab::error(), ErrorCode, iFace, MB_SUCCESS, moab::Range::merge(), moab::Range::size(), moab::Interface::tag_get_by_ptr(), and moab::WriteHDF5::TagDesc::tag_id.

Referenced by moab::WriteHDF5Parallel::create_tag_tables(), and serial_create_file().

◆ get_tag_size()

ErrorCode moab::WriteHDF5::get_tag_size ( Tag  tag,
DataType moab_type,
int &  num_bytes,
int &  elem_size,
int &  file_size,
mhdf_TagDataType file_type,
hid_t &  hdf_type 
)
private

Get size data for tag.

Parameters
tagMOAB tag ID
moab_typeOutput: DataType for tag
num_bytesOutput: MOAB tag size (bits for bit tags). MB_VARIABLE_LENGTH for variable-length tags.
elem_sizeOutput: Size of of the base data type of the tag data (e.g. sizeof(double) if moab_type == MB_TYPE_DOUBLE). One for bit and opaque tags.
array_sizeOutput: The number of valeus of size elem_size for each tag. Always 1 for opaque data. Nubmer of bits for bit tags.
file_typeOutput: mhdf type enumeration
hdf_typeOutput: Handle to HDF5 type object. Caller is responsible for releasing this object (calling H5Tclose).

Definition at line 2922 of file WriteHDF5.cpp.

2929 {
2930  ErrorCode rval;
2931  Tag type_handle;
2932  std::string tag_name, tag_type_name;
2933 
2935 
2936  // We return NULL for hdf_type if it can be determined from
2937  // the file_type. The only case where it is non-zero is
2938  // if the user specified a specific type via a mesh tag.
2939  hdf_type = (hid_t)0;
2940  bool close_hdf_type = false;
2941 
2942  rval = iFace->tag_get_data_type( tag, moab_type );
2943  CHK_MB_ERR_0( rval );
2944  rval = iFace->tag_get_length( tag, array_length );
2945  if( MB_VARIABLE_DATA_LENGTH == rval )
2946  {
2947  array_length = MB_VARIABLE_LENGTH;
2948  }
2949  else if( MB_SUCCESS != rval )
2950  return error( rval );
2951  rval = iFace->tag_get_bytes( tag, num_bytes );
2952  if( MB_VARIABLE_DATA_LENGTH == rval )
2953  num_bytes = MB_VARIABLE_LENGTH;
2954  else if( MB_SUCCESS != rval )
2955  return error( rval );
2956 
2957  switch( moab_type )
2958  {
2959  case MB_TYPE_INTEGER:
2960  type_size = sizeof( int );
2961  file_type = mhdf_INTEGER;
2962  hdf_type = H5T_NATIVE_INT;
2963  close_hdf_type = false;
2964  break;
2965  case MB_TYPE_DOUBLE:
2966  type_size = sizeof( double );
2967  file_type = mhdf_FLOAT;
2968  hdf_type = H5T_NATIVE_DOUBLE;
2969  close_hdf_type = false;
2970  break;
2971  case MB_TYPE_BIT:
2972  type_size = sizeof( bool );
2973  file_type = mhdf_BITFIELD;
2974  assert( array_length <= 8 );
2975  hdf_type = H5Tcopy( H5T_NATIVE_B8 );
2976  H5Tset_precision( hdf_type, array_length );
2977  close_hdf_type = true;
2978  break;
2979  case MB_TYPE_HANDLE:
2980  type_size = sizeof( EntityHandle );
2981  file_type = mhdf_ENTITY_ID;
2982  hdf_type = id_type;
2983  close_hdf_type = false;
2984  break;
2985  case MB_TYPE_OPAQUE:
2986  file_type = mhdf_OPAQUE;
2987  rval = iFace->tag_get_name( tag, tag_name );
2988  CHK_MB_ERR_0( rval );
2989  tag_type_name = "__hdf5_tag_type_";
2990  tag_type_name += tag_name;
2991  rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY );
2992  if( MB_TAG_NOT_FOUND == rval )
2993  {
2994  if( num_bytes == MB_VARIABLE_LENGTH )
2995  type_size = 1;
2996  else
2997  type_size = num_bytes;
2998  hdf_type = H5Tcreate( H5T_OPAQUE, type_size );
2999  close_hdf_type = true;
3000  }
3001  else if( MB_SUCCESS == rval )
3002  {
3003  int hsize;
3004  rval = iFace->tag_get_bytes( type_handle, hsize );
3005  if( hsize != sizeof( hid_t ) ) return error( MB_FAILURE );
3006 
3007  const EntityHandle root = 0;
3008  rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
3009  if( rval != MB_SUCCESS ) return error( rval );
3010 
3011  type_size = H5Tget_size( hdf_type );
3012  if( type_size != num_bytes ) return error( MB_FAILURE );
3013 
3014  close_hdf_type = false;
3015  }
3016  else
3017  return error( rval );
3018  num_bytes = array_length;
3019  array_length = ( num_bytes == MB_VARIABLE_LENGTH ) ? MB_VARIABLE_LENGTH : 1;
3020  break;
3021  default:
3022  break;
3023  }
3024 
3025  assert( num_bytes == MB_VARIABLE_LENGTH || ( moab_type == MB_TYPE_BIT && num_bytes == 1 ) ||
3026  array_length * type_size == num_bytes );
3027 
3028  if( num_bytes == MB_VARIABLE_LENGTH )
3029  {
3030  array_length = MB_VARIABLE_LENGTH;
3031  if( !close_hdf_type )
3032  {
3033  hdf_type = H5Tcopy( hdf_type );
3034  // close_hdf_type = true;
3035  }
3036  }
3037  else if( array_length > 1 && moab_type != MB_TYPE_BIT )
3038  {
3039  hsize_t len = array_length;
3040 #if defined( H5Tarray_create_vers ) && ( H5Tarray_create_vers > 1 )
3041  hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len );
3042 #else
3043  hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL );
3044 #endif
3045  if( close_hdf_type ) H5Tclose( hdf_type );
3046  hdf_type = temp_id;
3047  }
3048  else if( !close_hdf_type )
3049  {
3050  hdf_type = H5Tcopy( hdf_type );
3051  // close_hdf_type = true;
3052  }
3053 
3054  return MB_SUCCESS;
3055 }

References CHECK_OPEN_HANDLES, CHK_MB_ERR_0, moab::error(), ErrorCode, id_type, iFace, MB_SUCCESS, MB_TAG_ANY, MB_TAG_NOT_FOUND, MB_TYPE_BIT, MB_TYPE_DOUBLE, MB_TYPE_HANDLE, MB_TYPE_INTEGER, MB_TYPE_OPAQUE, MB_VARIABLE_DATA_LENGTH, MB_VARIABLE_LENGTH, mhdf_BITFIELD, mhdf_ENTITY_ID, mhdf_FLOAT, mhdf_INTEGER, mhdf_OPAQUE, moab::Interface::tag_get_bytes(), moab::Interface::tag_get_data(), moab::Interface::tag_get_data_type(), moab::Interface::tag_get_handle(), moab::Interface::tag_get_length(), moab::Interface::tag_get_name(), and tag_type_name().

Referenced by create_tag(), and write_tag().

◆ get_write_entities()

void moab::WriteHDF5::get_write_entities ( Range range)
protected

Get entities that will be written to file.

Definition at line 3273 of file WriteHDF5.cpp.

3274 {
3275  range.clear();
3276  range.merge( setSet.range );
3277  std::list< ExportSet >::reverse_iterator e;
3278  for( e = exportList.rbegin(); e != exportList.rend(); ++e )
3279  range.merge( e->range );
3280  range.merge( nodeSet.range );
3281 }

References moab::Range::clear(), exportList, moab::Range::merge(), nodeSet, moab::WriteHDF5::ExportSet::range, and setSet.

◆ init()

ErrorCode moab::WriteHDF5::init ( )
private

Definition at line 405 of file WriteHDF5.cpp.

406 {
407  ErrorCode rval;
408 
409  if( writeUtil ) // init has already been called
410  return MB_SUCCESS;
411  /*
412  #ifdef DEBUG
413  H5Eset_auto(&hdf_error_handler, writeUtil); // HDF5 callback for errors
414  #endif
415  */
416  // For known tag types, store the corresponding HDF5 in which
417  // the tag data is to be written in the file.
418  // register_known_tag_types(iFace);
419 
420  // Get the util interface
421  rval = iFace->query_interface( writeUtil );
422  CHK_MB_ERR_0( rval );
423 
424  idMap.clear();
425 
426 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
427  herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
428 #else
429  herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
430 #endif
431  if( err < 0 )
432  {
433  errorHandler.func = 0;
434  errorHandler.data = 0;
435  }
436  else
437  {
438 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
439  err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
440 #else
441  err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
442 #endif
443  if( err < 0 )
444  {
445  errorHandler.func = 0;
446  errorHandler.data = 0;
447  }
448  }
449 
450  if( !topState.valid() ) topState = MPEState( "WriteHDF5", "yellow" );
451  if( !subState.valid() ) subState = MPEState( "WriteHDF5 subevent", "cyan" );
452 
453  return MB_SUCCESS;
454 }

References CHK_MB_ERR_0, moab::RangeMap< KeyType, ValType, NullVal >::clear(), moab::WriteHDF5::HDF5ErrorHandler::data, ErrorCode, errorHandler, moab::WriteHDF5::HDF5ErrorHandler::func, moab::handle_hdf5_error(), idMap, iFace, MB_SUCCESS, moab::Interface::query_interface(), subState, topState, and writeUtil.

Referenced by write_file_impl().

◆ initialize_mesh()

ErrorCode moab::WriteHDF5::initialize_mesh ( const Range  entities_by_dim[5])
private

Initialize internal data structures from gathered mesh.

Definition at line 738 of file WriteHDF5.cpp.

739 {
740  ErrorCode rval;
741 
742  if( !ranges[0].all_of_type( MBVERTEX ) ) return error( MB_FAILURE );
743  nodeSet.range = ranges[0];
745  nodeSet.num_nodes = 1;
747 
748  if( !ranges[4].all_of_type( MBENTITYSET ) ) return error( MB_FAILURE );
749  setSet.range = ranges[4];
751  setSet.num_nodes = 0;
754 
755  exportList.clear();
756  std::vector< Range > bins( 1024 ); // Sort entities by connectivity length
757  // Resize is expensive due to Range copy, so start big
758  for( EntityType type = MBEDGE; type < MBENTITYSET; ++type )
759  {
760  ExportSet set;
761  set.max_num_ents = set.max_num_adjs = 0;
762  const int dim = CN::Dimension( type );
763 
764  // Group entities by connectivity length
765  bins.clear();
766  assert( dim >= 0 && dim <= 4 );
767  std::pair< Range::const_iterator, Range::const_iterator > p = ranges[dim].equal_range( type );
768  Range::const_iterator i = p.first;
769  while( i != p.second )
770  {
771  Range::const_iterator first = i;
772  EntityHandle const* conn;
773  int len, firstlen;
774 
775  // Dummy storage vector for structured mesh "get_connectivity" function
776  std::vector< EntityHandle > storage;
777 
778  rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage );
779  if( MB_SUCCESS != rval ) return error( rval );
780 
781  for( ++i; i != p.second; ++i )
782  {
783  rval = iFace->get_connectivity( *i, conn, len, false, &storage );
784  if( MB_SUCCESS != rval ) return error( rval );
785 
786  if( len != firstlen ) break;
787  }
788 
789  if( firstlen >= (int)bins.size() ) bins.resize( firstlen + 1 );
790  bins[firstlen].merge( first, i );
791  }
792  // Create ExportSet for each group
793  for( std::vector< Range >::iterator j = bins.begin(); j != bins.end(); ++j )
794  {
795  if( j->empty() ) continue;
796 
797  set.range.clear();
798  set.type = type;
799  set.num_nodes = j - bins.begin();
800  exportList.push_back( set );
801  exportList.back().range.swap( *j );
802  }
803  }
804 
805  return MB_SUCCESS;
806 }

References moab::Range::clear(), dim, moab::CN::Dimension(), moab::Range::equal_range(), moab::error(), ErrorCode, exportList, moab::GeomUtil::first(), moab::Interface::get_connectivity(), iFace, moab::WriteHDF5::ExportSet::max_num_adjs, moab::WriteHDF5::ExportSet::max_num_ents, maxNumSetChildren, maxNumSetContents, maxNumSetParents, MB_SUCCESS, MBEDGE, MBENTITYSET, MBVERTEX, nodeSet, moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::range, setSet, and moab::WriteHDF5::ExportType::type.

Referenced by gather_all_mesh(), and gather_mesh_info().

◆ parallel_create_file()

ErrorCode moab::WriteHDF5::parallel_create_file ( const char *  filename,
bool  overwrite,
const std::vector< std::string > &  qa_records,
const FileOptions opts,
const Tag tag_list,
int  num_tags,
int  dimension = 3,
double *  times = 0 
)
protectedvirtual

Function to create the file. Virtual to allow override for parallel version.

Reimplemented in moab::WriteHDF5Parallel.

Definition at line 2536 of file WriteHDF5.cpp.

2544 {
2545  MB_SET_ERR( MB_NOT_IMPLEMENTED, "WriteHDF5 does not support parallel writing" );
2546 }

References MB_NOT_IMPLEMENTED, and MB_SET_ERR.

Referenced by write_file_impl().

◆ print_id_map() [1/2]

void moab::WriteHDF5::print_id_map ( ) const
protected

Definition at line 3283 of file WriteHDF5.cpp.

3284 {
3285  print_id_map( std::cout, "" );
3286 }

◆ print_id_map() [2/2]

void moab::WriteHDF5::print_id_map ( std::ostream &  str,
const char *  prefix = "" 
) const
protected

Definition at line 3288 of file WriteHDF5.cpp.

3289 {
3291  for( i = idMap.begin(); i != idMap.end(); ++i )
3292  {
3293  const char* n1 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin ) );
3294  EntityID id = ID_FROM_HANDLE( i->begin );
3295  if( 1 == i->count )
3296  {
3297  s << pfx << n1 << " " << id << " -> " << i->value << std::endl;
3298  }
3299  else
3300  {
3301  const char* n2 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin + i->count - 1 ) );
3302  if( n1 == n2 )
3303  {
3304  s << pfx << n1 << " " << id << "-" << id + i->count - 1 << " -> " << i->value << "-"
3305  << i->value + i->count - 1 << std::endl;
3306  }
3307  else
3308  {
3309  s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE( i->begin + i->count - 1 ) << " -> "
3310  << i->value << "-" << i->value + i->count - 1 << std::endl;
3311  }
3312  }
3313  }
3314 }

References moab::RangeMap< KeyType, ValType, NullVal >::begin(), moab::RangeMap< KeyType, ValType, NullVal >::end(), moab::CN::EntityTypeName(), moab::ID_FROM_HANDLE(), idMap, and moab::TYPE_FROM_HANDLE().

◆ print_times()

void moab::WriteHDF5::print_times ( const double  times[NUM_TIMES]) const
protectedvirtual

Definition at line 3316 of file WriteHDF5.cpp.

3317 {
3318  std::cout << "WriteHDF5: " << t[TOTAL_TIME] << std::endl
3319  << " gather mesh: " << t[GATHER_TIME] << std::endl
3320  << " create file: " << t[CREATE_TIME] << std::endl
3321  << " create nodes: " << t[CREATE_NODE_TIME] << std::endl
3322  << " negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl
3323  << " create elem: " << t[CREATE_ELEM_TIME] << std::endl
3324  << " file id exch: " << t[FILEID_EXCHANGE_TIME] << std::endl
3325  << " create adj: " << t[CREATE_ADJ_TIME] << std::endl
3326  << " create set: " << t[CREATE_SET_TIME] << std::endl
3327  << " shared ids: " << t[SHARED_SET_IDS] << std::endl
3328  << " shared data: " << t[SHARED_SET_CONTENTS] << std::endl
3329  << " set offsets: " << t[SET_OFFSET_TIME] << std::endl
3330  << " create tags: " << t[CREATE_TAG_TIME] << std::endl
3331  << " coordinates: " << t[COORD_TIME] << std::endl
3332  << " connectivity: " << t[CONN_TIME] << std::endl
3333  << " sets: " << t[SET_TIME] << std::endl
3334  << " set descrip: " << t[SET_META] << std::endl
3335  << " set content: " << t[SET_CONTENT] << std::endl
3336  << " set parent: " << t[SET_PARENT] << std::endl
3337  << " set child: " << t[SET_CHILD] << std::endl
3338  << " adjacencies: " << t[ADJ_TIME] << std::endl
3339  << " tags: " << t[TAG_TIME] << std::endl
3340  << " dense data: " << t[DENSE_TAG_TIME] << std::endl
3341  << " sparse data: " << t[SPARSE_TAG_TIME] << std::endl
3342  << " var-len data: " << t[VARLEN_TAG_TIME] << std::endl;
3343 }

References ADJ_TIME, CONN_TIME, COORD_TIME, CREATE_ADJ_TIME, CREATE_ELEM_TIME, CREATE_NODE_TIME, CREATE_SET_TIME, CREATE_TAG_TIME, CREATE_TIME, DENSE_TAG_TIME, FILEID_EXCHANGE_TIME, GATHER_TIME, NEGOTIATE_TYPES_TIME, SET_CHILD, SET_CONTENT, SET_META, SET_OFFSET_TIME, SET_PARENT, SET_TIME, SHARED_SET_CONTENTS, SHARED_SET_IDS, SPARSE_TAG_TIME, TAG_TIME, TOTAL_TIME, and VARLEN_TAG_TIME.

Referenced by moab::WriteHDF5Parallel::print_times(), and write_file_impl().

◆ range_to_blocked_list() [1/2]

ErrorCode moab::WriteHDF5::range_to_blocked_list ( const EntityHandle input_ranges,
size_t  num_input_ranges,
std::vector< wid_t > &  output_id_list,
bool &  ranged_list 
)
protected

Get possibly compacted list of IDs for passed entities

For the passed range of entities, determine if IDs can be compacted and write IDs to passed list.

If the IDs are not compacted, the output list will contain a simple ordered list of IDs.

If IDs are compacted, the output list will contain {start,count} pairs.

If the ID list is compacted, ranged_list will be 'true'. Otherwise it will be 'false'.

Definition at line 1717 of file WriteHDF5.cpp.

1721 {
1722  // We assume this in the cast on the following line
1723  typedef std::pair< EntityHandle, EntityHandle > mtype;
1724  assert( sizeof( mtype ) == 2 * sizeof( EntityHandle ) );
1725  const mtype* arr = reinterpret_cast< const mtype* >( array );
1726  return range_to_blocked_list_templ( arr, arr + num_input_ranges, idMap, output_id_list, ranged_list );
1727 }

References idMap, and moab::range_to_blocked_list_templ().

◆ range_to_blocked_list() [2/2]

ErrorCode moab::WriteHDF5::range_to_blocked_list ( const Range input_range,
std::vector< wid_t > &  output_id_list,
bool &  ranged_list 
)
protected

Get possibly compacted list of IDs for passed entities

For the passed range of entities, determine if IDs can be compacted and write IDs to passed list.

If the IDs are not compacted, the output list will contain a simple ordered list of IDs.

If IDs are compacted, the output list will contain {start,count} pairs.

If the ID list is compacted, ranged_list will be 'true'. Otherwise it will be 'false'.

Definition at line 1709 of file WriteHDF5.cpp.

1712 {
1713  return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap,
1714  output_id_list, ranged_list );
1715 }

References moab::Range::const_pair_begin(), moab::Range::const_pair_end(), idMap, and moab::range_to_blocked_list_templ().

Referenced by moab::WriteHDF5Parallel::communicate_shared_set_data(), count_set_size(), moab::WriteHDF5Parallel::pack_set(), and write_set_data().

◆ range_to_id_list()

ErrorCode moab::WriteHDF5::range_to_id_list ( const Range input_range,
wid_t array 
)
protected

Definition at line 1729 of file WriteHDF5.cpp.

1730 {
1731  return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array );
1732 }

References moab::Range::const_pair_begin(), moab::Range::const_pair_end(), idMap, and moab::range_to_id_list_templ().

Referenced by write_sparse_ids().

◆ serial_create_file()

ErrorCode moab::WriteHDF5::serial_create_file ( const char *  filename,
bool  overwrite,
const std::vector< std::string > &  qa_records,
const Tag tag_list,
int  num_tags,
int  dimension = 3 
)
private

Definition at line 2548 of file WriteHDF5.cpp.

2554 {
2555  long first_id;
2556  mhdf_Status status;
2557  hid_t handle;
2558  std::list< ExportSet >::iterator ex_itor;
2559  ErrorCode rval;
2560 
2561  topState.start( "creating file" );
2562 
2563  const char* type_names[MBMAXTYPE];
2564  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
2565  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
2566  type_names[i] = CN::EntityTypeName( i );
2567 
2568  // Create the file
2569  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
2570  CHK_MHDF_ERR_0( status );
2571  assert( !!filePtr );
2572 
2573  rval = write_qa( qa_records );
2574  CHK_MB_ERR_0( rval );
2575 
2576  // Create node table
2577  if( nodeSet.range.size() )
2578  {
2580  handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status );
2581  CHK_MHDF_ERR_0( status );
2582  mhdf_closeData( filePtr, handle, &status );
2583  CHK_MHDF_ERR_0( status );
2584  nodeSet.first_id = (wid_t)first_id;
2586  CHK_MB_ERR_0( rval );
2587  }
2588  else
2589  {
2590  nodeSet.first_id = std::numeric_limits< wid_t >::max();
2591  }
2592  nodeSet.offset = 0;
2593 
2594  // Create element tables
2595  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
2596  {
2597  ex_itor->total_num_ents = ex_itor->range.size();
2598  rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id );
2599  CHK_MB_ERR_0( rval );
2600 
2601  ex_itor->first_id = (wid_t)first_id;
2602  ex_itor->offset = 0;
2603  rval = assign_ids( ex_itor->range, ex_itor->first_id );
2604  CHK_MB_ERR_0( rval );
2605  }
2606  // Create set tables
2607  writeSets = !setSet.range.empty();
2608  if( writeSets )
2609  {
2610  long contents_len, children_len, parents_len;
2611 
2614  rval = create_set_meta( setSet.total_num_ents, first_id );
2615  CHK_MB_ERR_0( rval );
2616 
2617  setSet.first_id = (wid_t)first_id;
2618  rval = assign_ids( setSet.range, setSet.first_id );
2619  CHK_MB_ERR_0( rval );
2620 
2621  rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
2622  CHK_MB_ERR_0( rval );
2623 
2624  rval = create_set_tables( contents_len, children_len, parents_len );
2625  CHK_MB_ERR_0( rval );
2626 
2627  setSet.offset = 0;
2628  setContentsOffset = 0;
2629  setChildrenOffset = 0;
2630  setParentsOffset = 0;
2631  writeSetContents = !!contents_len;
2632  writeSetChildren = !!children_len;
2633  writeSetParents = !!parents_len;
2634 
2635  maxNumSetContents = contents_len;
2636  maxNumSetChildren = children_len;
2637  maxNumSetParents = parents_len;
2638  } // if (!setSet.range.empty())
2639 
2640  // Create adjacency table after set table, because sets do not have yet an id
2641  // some entities are adjacent to sets (exodus?)
2642  // Create node adjacency table
2643  wid_t num_adjacencies;
2644 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
2645  rval = count_adjacencies( nodeSet.range, num_adjacencies );
2646  CHK_MB_ERR_0( rval );
2647  nodeSet.adj_offset = 0;
2648  nodeSet.max_num_adjs = num_adjacencies;
2649  if( num_adjacencies > 0 )
2650  {
2651  handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status );
2652  CHK_MHDF_ERR_0( status );
2653  mhdf_closeData( filePtr, handle, &status );
2654  }
2655 #endif
2656 
2657  // Create element adjacency tables
2658  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
2659  {
2660  rval = count_adjacencies( ex_itor->range, num_adjacencies );
2661  CHK_MB_ERR_0( rval );
2662 
2663  ex_itor->adj_offset = 0;
2664  ex_itor->max_num_adjs = num_adjacencies;
2665  if( num_adjacencies > 0 )
2666  {
2667  handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status );
2668  CHK_MHDF_ERR_0( status );
2669  mhdf_closeData( filePtr, handle, &status );
2670  }
2671  }
2672 
2673  dbgOut.tprint( 1, "Gathering Tags\n" );
2674 
2675  rval = gather_tags( user_tag_list, num_user_tags );
2676  CHK_MB_ERR_0( rval );
2677 
2678  // Create the tags and tag data tables
2679  std::list< TagDesc >::iterator tag_iter = tagList.begin();
2680  for( ; tag_iter != tagList.end(); ++tag_iter )
2681  {
2682  // As we haven't yet added any ExportSets for which to write
2683  // dense tag data to the TagDesc struct pointed to by
2684  // tag_iter, this call will initially return all tagged entities
2685  // in the set of entities to be written.
2686  Range range;
2687  rval = get_sparse_tagged_entities( *tag_iter, range );
2688  CHK_MB_ERR_0( rval );
2689 
2690  int s;
2691  bool var_len = ( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) );
2692 
2693  // Determine which ExportSets we want to write dense
2694  // data for. We never write dense data for variable-length
2695  // tag data.
2696  if( !var_len && writeTagDense )
2697  {
2698  // Check if we want to write this tag in dense format even if not
2699  // all of the entities have a tag value. The criterion of this
2700  // is that the tag be dense, have a default value, and have at
2701  // least 2/3 of the entities tagged.
2702  bool prefer_dense = false;
2703  TagType type;
2704  rval = iFace->tag_get_type( tag_iter->tag_id, type );
2705  CHK_MB_ERR_0( rval );
2706  if( MB_TAG_DENSE == type )
2707  {
2708  const void* defval = 0;
2709  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
2710  if( MB_SUCCESS == rval ) prefer_dense = true;
2711  }
2712 
2713  if( check_dense_format_tag( nodeSet, range, prefer_dense ) )
2714  {
2715  range -= nodeSet.range;
2716  tag_iter->dense_list.push_back( nodeSet );
2717  }
2718 
2719  std::list< ExportSet >::const_iterator ex = exportList.begin();
2720  for( ; ex != exportList.end(); ++ex )
2721  {
2722  if( check_dense_format_tag( *ex, range, prefer_dense ) )
2723  {
2724  range -= ex->range;
2725  tag_iter->dense_list.push_back( *ex );
2726  }
2727  }
2728 
2729  if( check_dense_format_tag( setSet, range, prefer_dense ) )
2730  {
2731  range -= setSet.range;
2732  tag_iter->dense_list.push_back( setSet );
2733  }
2734  }
2735 
2736  tag_iter->write_sparse = !range.empty();
2737 
2738  unsigned long var_len_total = 0;
2739  if( var_len )
2740  {
2741  rval = get_tag_data_length( *tag_iter, range, var_len_total );
2742  CHK_MB_ERR_0( rval );
2743  }
2744 
2745  rval = create_tag( *tag_iter, range.size(), var_len_total );
2746  CHK_MB_ERR_0( rval );
2747  } // for (tags)
2748 
2749  topState.end();
2750  return MB_SUCCESS;
2751 }

References moab::WriteHDF5::ExportSet::adj_offset, assign_ids(), check_dense_format_tag(), CHK_MB_ERR_0, CHK_MHDF_ERR_0, count_adjacencies(), count_set_size(), create_elem_table(), create_set_meta(), create_set_tables(), create_tag(), dbgOut, moab::Range::empty(), moab::CN::EntityTypeName(), ErrorCode, exportList, filePtr, moab::WriteHDF5::ExportSet::first_id, gather_tags(), get_sparse_tagged_entities(), get_tag_data_length(), id_type, iFace, moab::WriteHDF5::ExportSet::max_num_adjs, moab::WriteHDF5::ExportSet::max_num_ents, maxNumSetChildren, maxNumSetContents, maxNumSetParents, MB_SUCCESS, MB_TAG_DENSE, MB_VARIABLE_DATA_LENGTH, MBEDGE, MBENTITYSET, MBMAXTYPE, mhdf_closeData(), mhdf_createAdjacency(), mhdf_createFile(), mhdf_createNodeCoords(), mhdf_node_type_handle(), nodeSet, moab::WriteHDF5::ExportSet::offset, moab::WriteHDF5::ExportSet::range, setChildrenOffset, setContentsOffset, setParentsOffset, setSet, moab::Range::size(), moab::Interface::tag_get_default_value(), moab::Interface::tag_get_length(), moab::Interface::tag_get_type(), tagList, TagType, topState, moab::WriteHDF5::ExportSet::total_num_ents, moab::DebugOutput::tprint(), write_qa(), writeSetChildren, writeSetContents, writeSetParents, writeSets, and writeTagDense.

Referenced by write_file_impl().

◆ vector_to_id_list() [1/3]

ErrorCode moab::WriteHDF5::vector_to_id_list ( const EntityHandle input,
size_t  input_len,
wid_t output,
size_t &  output_len,
bool  remove_non_written 
)
protected

Get IDs for entities.

Definition at line 1734 of file WriteHDF5.cpp.

1739 {
1740  const EntityHandle* i_iter = input;
1741  const EntityHandle* i_end = input + input_len;
1742  wid_t* o_iter = output;
1743  for( ; i_iter != i_end; ++i_iter )
1744  {
1745  wid_t id = idMap.find( *i_iter );
1746  if( !remove_zeros || id != 0 )
1747  {
1748  *o_iter = id;
1749  ++o_iter;
1750  }
1751  }
1752  output_len = o_iter - output;
1753 
1754  return MB_SUCCESS;
1755 }

References moab::RangeMap< KeyType, ValType, NullVal >::find(), idMap, MB_SUCCESS, and output.

◆ vector_to_id_list() [2/3]

ErrorCode moab::WriteHDF5::vector_to_id_list ( const EntityHandle input,
wid_t output,
size_t  num_entities 
)
protected

Get IDs for entities.

Definition at line 1768 of file WriteHDF5.cpp.

1769 {
1770  size_t output_len;
1771  return vector_to_id_list( input, count, output, output_len, false );
1772 }

References output, and vector_to_id_list().

◆ vector_to_id_list() [3/3]

ErrorCode moab::WriteHDF5::vector_to_id_list ( const std::vector< EntityHandle > &  input,
std::vector< wid_t > &  output,
bool  remove_non_written = false 
)
protected

Get IDs for entities.

Definition at line 1757 of file WriteHDF5.cpp.

1760 {
1761  output.resize( input.size() );
1762  size_t output_size = 0;
1763  ErrorCode rval = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros );
1764  output.resize( output_size );
1765  return rval;
1766 }

References ErrorCode, and output.

Referenced by moab::WriteHDF5Parallel::communicate_shared_set_data(), moab::WriteHDF5Parallel::pack_set(), vector_to_id_list(), and write_set_data().

◆ write_adjacencies()

ErrorCode moab::WriteHDF5::write_adjacencies ( const ExportSet export_set)
private

Write adjacency info for passed set of elements

Note: Must have written element connectivity so elements have IDs assigned.

Definition at line 1790 of file WriteHDF5.cpp.

1791 {
1792  ErrorCode rval;
1793  mhdf_Status status;
1794  Range::const_iterator iter;
1795  const Range::const_iterator end = elements.range.end();
1796  std::vector< wid_t > adj_list;
1797 
1799 
1800  debug_barrier();
1801 
1802  /* Count Adjacencies */
1803  long count = 0;
1804  // for (iter = elements.range.begin(); iter != end; ++iter) {
1805  // adj_list.clear();
1806  // rval = get_adjacencies(*iter, adj_list);CHK_MB_ERR_0(rval);
1807  //
1808  // if (adj_list.size() > 0)
1809  // count += adj_list.size() + 2;
1810  //}
1811 
1812  // if (count == 0)
1813  // return MB_SUCCESS;
1814 
1815  long offset = elements.adj_offset;
1816  if( elements.max_num_adjs == 0 ) return MB_SUCCESS;
1817 
1818  /* Create data list */
1819  hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status );
1820  CHK_MHDF_ERR_0( status );
1821  IODebugTrack track( debugTrack, "Adjacencies", count );
1822 
1823  /* Write data */
1825  long chunk_size = bufferSize / sizeof( wid_t );
1826  long num_writes = ( elements.max_num_adjs + chunk_size - 1 ) / chunk_size;
1828  count = 0;
1829  for( iter = elements.range.begin(); iter != end; ++iter )
1830  {
1831  adj_list.clear();
1832  rval = get_adjacencies( *iter, adj_list );
1833  CHK_MB_ERR_1( rval, table, status );
1834  if( adj_list.size() == 0 ) continue;
1835 
1836  // If buffer is full, flush it
1837  if( count + adj_list.size() + 2 > (unsigned long)chunk_size )
1838  {
1839  dbgOut.print( 3, " writing adjacency chunk.\n" );
1840  track.record_io( offset, count );
1841  mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
1842  CHK_MHDF_ERR_1( status, table );
1844 
1845  offset += count;
1846  count = 0;
1847  }
1848 
1849  buffer[count++] = idMap.find( *iter );
1850  buffer[count++] = adj_list.size();
1851 
1852  assert( adj_list.size() + 2 < (unsigned long)chunk_size );
1853  memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof( wid_t ) );
1854  count += adj_list.size();
1855  }
1856 
1857  if( count )
1858  {
1859  dbgOut.print( 2, " writing final adjacency chunk.\n" );
1860  mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
1861  CHK_MHDF_ERR_1( status, table );
1862 
1863  offset += count;
1864  count = 0;
1865  --num_writes;
1866  }
1867 
1868  // Do empty writes if necessary for parallel collective IO
1869  if( collectiveIO )
1870  {
1871  while( num_writes > 0 )
1872  {
1873  --num_writes;
1874  assert( writeProp != H5P_DEFAULT );
1875  dbgOut.print( 2, " writing empty adjacency chunk.\n" );
1876  mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
1877  CHK_MHDF_ERR_1( status, table );
1878  }
1879  }
1880 
1881  mhdf_closeData( filePtr, table, &status );
1882  CHK_MHDF_ERR_0( status );
1883 
1884  track.all_reduce();
1885  return MB_SUCCESS;
1886 }

References moab::WriteHDF5::ExportSet::adj_offset, moab::IODebugTrack::all_reduce(), moab::Range::begin(), buffer, bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_1, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, collectiveIO, dataBuffer, dbgOut, debug_barrier, debugTrack, moab::Range::end(), ErrorCode, filePtr, moab::RangeMap< KeyType, ValType, NullVal >::find(), get_adjacencies(), id_type, idMap, moab::WriteHDF5::ExportSet::max_num_adjs, MB_SUCCESS, mhdf_closeData(), mhdf_openAdjacency(), mhdf_writeAdjacencyWithOpt(), moab::WriteHDF5::ExportSet::name(), moab::DebugOutput::print(), moab::WriteHDF5::ExportSet::range, moab::IODebugTrack::record_io(), VALGRIND_MAKE_MEM_UNDEFINED, and writeProp.

Referenced by write_file_impl().

◆ write_dense_tag()

ErrorCode moab::WriteHDF5::write_dense_tag ( const TagDesc tag_data,
const ExportSet elem_data,
const std::string &  tag_name,
DataType  tag_data_type,
hid_t  hdf5_data_type,
int  hdf5_type_size 
)
private

Write dense-formatted tag data.

Definition at line 2295 of file WriteHDF5.cpp.

2301 {
2303 
2304  // Open tables to write info
2305  mhdf_Status status;
2306  long table_size;
2307  hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status );
2308  CHK_MHDF_ERR_0( status );
2309  assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size );
2310 
2311  IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size );
2312  ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type,
2313  value_type, value_type_size, elem_data.max_num_ents, track );
2314  CHK_MB_ERR_0( rval );
2315  mhdf_closeData( filePtr, table, &status );
2316  CHK_MHDF_ERR_0( status );
2317 
2318  return MB_SUCCESS;
2319 }

References CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, debugTrack, ErrorCode, filePtr, moab::WriteHDF5::ExportSet::max_num_ents, MB_SUCCESS, mhdf_closeData(), mhdf_openDenseTagData(), moab::WriteHDF5::ExportSet::name(), moab::WriteHDF5::ExportSet::offset, moab::WriteHDF5::ExportSet::range, moab::Range::size(), moab::WriteHDF5::TagDesc::tag_id, and write_tag_values().

Referenced by write_tag().

◆ write_elems()

ErrorCode moab::WriteHDF5::write_elems ( ExportSet elemset)
private

Write out element connectivity.

Write connectivity for passed set of elements.

Note: Assigns element IDs. Note: Must do write_nodes first so node IDs get assigned.

Definition at line 1013 of file WriteHDF5.cpp.

1014 {
1015  mhdf_Status status;
1016  ErrorCode rval;
1017  long first_id;
1018  int nodes_per_elem;
1019  long table_size;
1020 
1022 
1023  debug_barrier();
1024  dbgOut.printf( 2, "Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(),
1025  CN::EntityTypeName( elems.type ), elems.num_nodes );
1026  dbgOut.print( 3, "Writing elements", elems.range );
1027 
1028  hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status );
1029  CHK_MHDF_ERR_0( status );
1030  IODebugTrack track( debugTrack, elems.name() && strlen( elems.name() ) ? elems.name() : "<ANONYMOUS ELEM SET?>",
1031  table_size );
1032 
1033  assert( (unsigned long)first_id <= elems.first_id );
1034  assert( (unsigned long)table_size >= elems.offset + elems.range.size() );
1035 
1037  int chunk_size = bufferSize / ( elems.num_nodes * sizeof( wid_t ) );
1038  long offset = elems.offset;
1039  long remaining = elems.range.size();
1040  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
1041  if( elems.max_num_ents )
1042  {
1043  assert( elems.max_num_ents >= remaining );
1044  num_writes = ( elems.max_num_ents + chunk_size - 1 ) / chunk_size;
1045  }
1046  long remaining_writes = num_writes;
1047  Range::iterator iter = elems.range.begin();
1048 
1049  while( remaining )
1050  {
1052  long count = chunk_size < remaining ? chunk_size : remaining;
1053  remaining -= count;
1054 
1055  Range::iterator next = iter;
1056  next += count;
1057  rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer );
1058  CHK_MB_ERR_1( rval, elem_table, status );
1059  iter = next;
1060 
1061  for( long i = 0; i < count * nodes_per_elem; ++i )
1062  {
1063  buffer[i] = idMap.find( buffer[i] );
1064  if( 0 == buffer[i] )
1065  {
1066  MB_SET_ERR_CONT( "Invalid " << elems.name() << " element connectivity. Write Aborted" );
1067  mhdf_closeData( filePtr, elem_table, &status );
1068  return error( MB_FAILURE );
1069  }
1070  }
1071 
1072  dbgOut.printf( 3, " writing node connectivity %ld of %ld, %ld values at %ld\n",
1073  num_writes - remaining_writes + 1, num_writes, count, offset );
1074  track.record_io( offset, count );
1075  mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status );
1076  CHK_MHDF_ERR_1( status, elem_table );
1077 
1078  offset += count;
1079  --remaining_writes;
1080  }
1081 
1082  // Do empty writes if necessary for parallel collective IO
1083  if( collectiveIO )
1084  {
1085  while( remaining_writes-- )
1086  {
1087  assert( writeProp != H5P_DEFAULT );
1088  dbgOut.printf( 3, " writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1,
1089  num_writes );
1090  mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
1091  CHK_MHDF_ERR_1( status, elem_table );
1092  }
1093  }
1094 
1095  mhdf_closeData( filePtr, elem_table, &status );
1096  CHK_MHDF_ERR_0( status );
1097 
1098  track.all_reduce();
1099  return MB_SUCCESS;
1100 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), buffer, bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_1, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, collectiveIO, dataBuffer, dbgOut, debug_barrier, debugTrack, moab::CN::EntityTypeName(), moab::error(), ErrorCode, filePtr, moab::RangeMap< KeyType, ValType, NullVal >::find(), moab::WriteHDF5::ExportSet::first_id, moab::WriteUtilIface::get_element_connect(), id_type, idMap, moab::WriteHDF5::ExportSet::max_num_ents, MB_SET_ERR_CONT, MB_SUCCESS, mhdf_closeData(), mhdf_openConnectivity(), mhdf_writeConnectivityWithOpt(), moab::WriteHDF5::ExportSet::name(), moab::WriteHDF5::ExportType::num_nodes, moab::WriteHDF5::ExportSet::offset, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::WriteHDF5::ExportSet::range, moab::IODebugTrack::record_io(), moab::Range::size(), moab::WriteHDF5::ExportType::type, VALGRIND_MAKE_MEM_UNDEFINED, writeProp, and writeUtil.

Referenced by write_file_impl().

◆ write_file()

ErrorCode moab::WriteHDF5::write_file ( const char *  filename,
const bool  overwrite,
const FileOptions opts,
const EntityHandle export_sets,
const int  export_set_count,
const std::vector< std::string > &  qa_records,
const Tag tag_list = NULL,
int  num_tags = 0,
int  user_dimension = 3 
)
virtual

Export specified meshsets to file

Parameters
filenameThe filename to export.
export_setsArray of handles to sets to export, or NULL to export all.
export_set_countLength of export_sets array.

Implements moab::WriterIface.

Definition at line 492 of file WriteHDF5.cpp.

501 {
502  mhdf_Status status;
503 
504  parallelWrite = false;
505  collectiveIO = false;
506 
507  // Enable debug output
508  int tmpval = 0;
509  if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) ) dbgOut.set_verbosity( tmpval );
510 
511  // writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS"));
512  writeTagDense = true;
513 
514  // Enable some extra checks for reads. Note: amongst other things this
515  // will print errors if the entire file is not read, so if doing a
516  // partial read that is not a parallel read, this should be disabled.
517  debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
518 
520  int buf_size;
521  ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size );
522  if( MB_SUCCESS == rval && buf_size >= 24 ) bufferSize = buf_size;
523 
524  // Allocate internal buffer to use when gathering data to write.
525  dataBuffer = (char*)malloc( bufferSize );
527 
528  // Clear filePtr so we know if it is open upon failure
529  filePtr = 0;
530 
531  // Do actual write.
532  writeProp = H5P_DEFAULT;
533  ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags,
534  user_dimension );
535  // Close writeProp if it was opened
536  if( writeProp != H5P_DEFAULT ) H5Pclose( writeProp );
537 
538  // Free memory buffer
539  free( dataBuffer );
540  dataBuffer = 0;
541 
542  // Close file
543  bool created_file = false;
544  if( filePtr )
545  {
546  created_file = true;
547  mhdf_closeFile( filePtr, &status );
548  filePtr = 0;
549  if( mhdf_isError( &status ) )
550  {
551  MB_SET_ERR_CONT( mhdf_message( &status ) );
552  if( MB_SUCCESS == result ) result = MB_FAILURE;
553  }
554  }
555 
556  // Release other resources
557  if( MB_SUCCESS == result )
558  result = write_finished();
559  else
560  write_finished();
561 
562  // If write failed, remove file unless KEEP option was specified
563  if( MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ) )
564  remove( filename );
565 
566  return result;
567 }

References bufferSize, collectiveIO, dataBuffer, dbgOut, debugTrack, moab::error(), ErrorCode, filePtr, moab::FileOptions::get_int_option(), moab::FileOptions::get_null_option(), MB_ENTITY_NOT_FOUND, MB_MEMORY_ALLOCATION_FAILED, MB_SET_ERR_CONT, MB_SUCCESS, mhdf_closeFile(), mhdf_isError(), mhdf_message(), parallelWrite, moab::DebugOutput::set_verbosity(), write_file_impl(), write_finished(), WRITE_HDF5_BUFFER_SIZE, writeProp, and writeTagDense.

◆ write_file_impl()

ErrorCode moab::WriteHDF5::write_file_impl ( const char *  filename,
const bool  overwrite,
const FileOptions opts,
const EntityHandle export_sets,
const int  export_set_count,
const std::vector< std::string > &  qa_records,
const Tag tag_list,
int  num_tags,
int  user_dimension = 3 
)
private

Do the actual work of write_file. Separated from write_file for easier resource cleanup.

Definition at line 569 of file WriteHDF5.cpp.

578 {
579  ErrorCode result;
580  std::list< TagDesc >::const_iterator t_itor;
581  std::list< ExportSet >::iterator ex_itor;
582  EntityHandle elem_count, max_id;
583  double times[NUM_TIMES] = { 0 };
584 
585  if( MB_SUCCESS != init() ) return error( MB_FAILURE );
586 
587  // See if we need to report times
588  bool cputime = false;
589  result = opts.get_null_option( "CPUTIME" );
590  if( MB_SUCCESS == result ) cputime = true;
591 
592  CpuTimer timer;
593 
594  dbgOut.tprint( 1, "Gathering Mesh\n" );
595  topState.start( "gathering mesh" );
596 
597  // Gather mesh to export
598  exportList.clear();
599  if( 0 == num_sets || ( 1 == num_sets && set_array[0] == 0 ) )
600  {
601  result = gather_all_mesh();
602  topState.end( result );
603  CHK_MB_ERR_0( result );
604  }
605  else
606  {
607  std::vector< EntityHandle > passed_export_list( set_array, set_array + num_sets );
608  result = gather_mesh_info( passed_export_list );
609  topState.end( result );
610  CHK_MB_ERR_0( result );
611  }
612 
613  times[GATHER_TIME] = timer.time_elapsed();
614 
615  // if (nodeSet.range.size() == 0)
616  // return error(MB_ENTITY_NOT_FOUND);
617 
618  dbgOut.tprint( 1, "Checking ID space\n" );
619 
620  // Make sure ID space is sufficient
621  elem_count = nodeSet.range.size() + setSet.range.size();
622  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
623  elem_count += ex_itor->range.size();
624  max_id = (EntityHandle)1 << ( 8 * sizeof( wid_t ) - 1 );
625  if( elem_count > max_id )
626  {
627  MB_SET_ERR_CONT( "ID space insufficient for mesh size" );
628  return error( result );
629  }
630 
631  dbgOut.tprint( 1, "Creating File\n" );
632 
633  // Figure out the dimension in which to write the mesh.
634  int mesh_dim;
635  result = iFace->get_dimension( mesh_dim );
636  CHK_MB_ERR_0( result );
637 
638  if( user_dimension < 1 ) user_dimension = mesh_dim;
639  user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension;
640 
641  // Create the file layout, including all tables (zero-ed) and
642  // all structure and meta information.
643  const char* optnames[] = { "WRITE_PART", "FORMAT", 0 };
644  int junk;
645  parallelWrite = ( MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ) );
646  if( parallelWrite )
647  {
648  // Just store Boolean value based on string option here.
649  // parallel_create_file will set writeProp accordingly.
650  // collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE"));
651  // dbgOut.printf(2, "'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO");
652  // Do this all the time, as it appears to be much faster than indep in some cases
653  collectiveIO = true;
654  result =
655  parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times );
656  }
657  else
658  {
659  result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension );
660  }
661  if( MB_SUCCESS != result ) return error( result );
662 
663  times[CREATE_TIME] = timer.time_elapsed();
664 
665  dbgOut.tprint( 1, "Writing Nodes.\n" );
666  // Write node coordinates
667  if( !nodeSet.range.empty() || parallelWrite )
668  {
669  topState.start( "writing coords" );
670  result = write_nodes();
671  topState.end( result );
672  if( MB_SUCCESS != result ) return error( result );
673  }
674 
675  times[COORD_TIME] = timer.time_elapsed();
676 
677  dbgOut.tprint( 1, "Writing connectivity.\n" );
678 
679  // Write element connectivity
680  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
681  {
682  topState.start( "writing connectivity for ", ex_itor->name() );
683  result = write_elems( *ex_itor );
684  topState.end( result );
685  if( MB_SUCCESS != result ) return error( result );
686  }
687  times[CONN_TIME] = timer.time_elapsed();
688 
689  dbgOut.tprint( 1, "Writing sets.\n" );
690 
691  // Write meshsets
692  result = write_sets( times );
693  if( MB_SUCCESS != result ) return error( result );
694  debug_barrier();
695 
696  times[SET_TIME] = timer.time_elapsed();
697  dbgOut.tprint( 1, "Writing adjacencies.\n" );
698 
699  // Write adjacencies
700  // Tim says don't save node adjacencies!
701 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
702  result = write_adjacencies( nodeSet );
703  if( MB_SUCCESS != result ) return error( result );
704 #endif
705  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
706  {
707  topState.start( "writing adjacencies for ", ex_itor->name() );
708  result = write_adjacencies( *ex_itor );
709  topState.end( result );
710  if( MB_SUCCESS != result ) return error( result );
711  }
712  times[ADJ_TIME] = timer.time_elapsed();
713 
714  dbgOut.tprint( 1, "Writing tags.\n" );
715 
716  // Write tags
717  for( t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor )
718  {
719  std::string name;
720  iFace->tag_get_name( t_itor->tag_id, name );
721  topState.start( "writing tag: ", name.c_str() );
722  result = write_tag( *t_itor, times );
723  topState.end( result );
724  if( MB_SUCCESS != result ) return error( result );
725  }
726  times[TAG_TIME] = timer.time_elapsed();
727 
728  times[TOTAL_TIME] = timer.time_since_birth();
729 
730  if( cputime )
731  {
732  print_times( times );
733  }
734 
735  return MB_SUCCESS;
736 }

References ADJ_TIME, CHK_MB_ERR_0, collectiveIO, CONN_TIME, COORD_TIME, CREATE_TIME, dbgOut, debug_barrier, moab::Range::empty(), moab::error(), ErrorCode, exportList, gather_all_mesh(), gather_mesh_info(), GATHER_TIME, moab::Interface::get_dimension(), moab::FileOptions::get_null_option(), iFace, init(), moab::FileOptions::match_option(), MB_SET_ERR_CONT, MB_SUCCESS, nodeSet, NUM_TIMES, parallel_create_file(), parallelWrite, print_times(), moab::WriteHDF5::ExportSet::range, serial_create_file(), SET_TIME, setSet, moab::Range::size(), moab::Interface::tag_get_name(), TAG_TIME, tagList, moab::CpuTimer::time_elapsed(), moab::CpuTimer::time_since_birth(), topState, TOTAL_TIME, moab::DebugOutput::tprint(), write_adjacencies(), write_elems(), write_nodes(), write_sets(), and write_tag().

Referenced by write_file().

◆ write_finished()

ErrorCode moab::WriteHDF5::write_finished ( )
protectedvirtual

Definition at line 456 of file WriteHDF5.cpp.

457 {
458  // Release memory allocated in lists
459  exportList.clear();
460  nodeSet.range.clear();
461  setSet.range.clear();
462  tagList.clear();
463  idMap.clear();
464 
465  HDF5ErrorHandler handler;
466 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
467  herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
468 #else
469  herr_t err = H5Eget_auto( &handler.func, &handler.data );
470 #endif
471  if( err >= 0 && handler.func == &handle_hdf5_error )
472  {
473  assert( handler.data == &errorHandler );
474 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
475  H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
476 #else
477  H5Eset_auto( errorHandler.func, errorHandler.data );
478 #endif
479  }
480 
481  return MB_SUCCESS;
482 }

References moab::Range::clear(), moab::RangeMap< KeyType, ValType, NullVal >::clear(), moab::WriteHDF5::HDF5ErrorHandler::data, errorHandler, exportList, moab::WriteHDF5::HDF5ErrorHandler::func, moab::handle_hdf5_error(), idMap, MB_SUCCESS, nodeSet, moab::WriteHDF5::ExportSet::range, setSet, and tagList.

Referenced by write_file().

◆ write_nodes()

ErrorCode moab::WriteHDF5::write_nodes ( )
private

Write out the nodes.

Note: Assigns IDs to nodes.

Definition at line 904 of file WriteHDF5.cpp.

905 {
906  mhdf_Status status;
907  int dim, mesh_dim;
908  ErrorCode rval;
909  hid_t node_table;
910  long first_id, num_nodes;
911 
912  if( !nodeSet.total_num_ents ) return MB_SUCCESS; // No nodes!
913 
915 
916  rval = iFace->get_dimension( mesh_dim );
917  CHK_MB_ERR_0( rval );
918 
919  debug_barrier();
920  dbgOut.print( 3, "Opening Node Coords\n" );
921  node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status );
922  CHK_MHDF_ERR_0( status );
923  IODebugTrack track( debugTrack, "nodes", num_nodes );
924 
925  double* buffer = (double*)dataBuffer;
926 #ifdef BLOCKED_COORD_IO
927  int chunk_size = bufferSize / sizeof( double );
928 #else
929  int chunk_size = bufferSize / ( 3 * sizeof( double ) );
930 #endif
931 
932  long remaining = nodeSet.range.size();
933  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
934  if( nodeSet.max_num_ents )
935  {
936  assert( nodeSet.max_num_ents >= remaining );
937  num_writes = ( nodeSet.max_num_ents + chunk_size - 1 ) / chunk_size;
938  }
939  long remaining_writes = num_writes;
940 
941  long offset = nodeSet.offset;
942  Range::const_iterator iter = nodeSet.range.begin();
943  dbgOut.printf( 3, "Writing %ld nodes in %ld blocks of %d\n", remaining, ( remaining + chunk_size - 1 ) / chunk_size,
944  chunk_size );
945  while( remaining )
946  {
948  long count = chunk_size < remaining ? chunk_size : remaining;
949  remaining -= count;
950  Range::const_iterator end = iter;
951  end += count;
952 
953 #ifdef BLOCKED_COORD_IO
954  for( int d = 0; d < dim; d++ )
955  {
956  if( d < mesh_dim )
957  {
958  rval = writeUtil->get_node_coords( d, iter, end, count, buffer );
959  CHK_MB_ERR_1( rval, node_table, status );
960  }
961  else
962  memset( buffer, 0, count * sizeof( double ) );
963 
964  dbgOut.printf( 3, " writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)( 'X' + d ),
965  num_writes - remaining_writes + 1, num_writes, count, offset );
966  mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
967  CHK_MHDF_ERR_1( status, node_table );
968  }
969 #else
970  rval = writeUtil->get_node_coords( -1, iter, end, 3 * count, buffer );
971  CHK_MB_ERR_1( rval, node_table, status );
972  dbgOut.printf( 3, " writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1,
973  num_writes, count, offset );
974  mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status );
975  CHK_MHDF_ERR_1( status, node_table );
976 #endif
977  track.record_io( offset, count );
978 
979  iter = end;
980  offset += count;
981  --remaining_writes;
982  }
983 
984  // Do empty writes if necessary for parallel collective IO
985  if( collectiveIO )
986  {
987  while( remaining_writes-- )
988  {
989  assert( writeProp != H5P_DEFAULT );
990 #ifdef BLOCKED_COORD_IO
991  for( int d = 0; d < dim; ++d )
992  {
993  dbgOut.printf( 3, " writing (empty) %c node chunk %ld of %ld.\n", (char)( 'X' + d ),
994  num_writes - remaining_writes, num_writes );
995  mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
996  CHK_MHDF_ERR_1( status, node_table );
997  }
998 #else
999  dbgOut.printf( 3, " writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes );
1000  mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status );
1001  CHK_MHDF_ERR_1( status, node_table );
1002 #endif
1003  }
1004  }
1005 
1006  mhdf_closeData( filePtr, node_table, &status );
1007  CHK_MHDF_ERR_0( status );
1008 
1009  track.all_reduce();
1010  return MB_SUCCESS;
1011 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), buffer, bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MB_ERR_1, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, collectiveIO, dataBuffer, dbgOut, debug_barrier, debugTrack, dim, ErrorCode, filePtr, moab::Interface::get_dimension(), moab::WriteUtilIface::get_node_coords(), iFace, moab::WriteHDF5::ExportSet::max_num_ents, MB_SUCCESS, mhdf_closeData(), mhdf_openNodeCoords(), mhdf_writeNodeCoordsWithOpt(), mhdf_writeNodeCoordWithOpt(), nodeSet, moab::WriteHDF5::ExportSet::offset, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::WriteHDF5::ExportSet::range, moab::IODebugTrack::record_io(), moab::Range::size(), moab::WriteHDF5::ExportSet::total_num_ents, VALGRIND_MAKE_MEM_UNDEFINED, writeProp, and writeUtil.

Referenced by write_file_impl().

◆ write_qa()

ErrorCode moab::WriteHDF5::write_qa ( const std::vector< std::string > &  list)
protected

Write exodus-type QA info.

Definition at line 2401 of file WriteHDF5.cpp.

2402 {
2403  const char* app = "MOAB";
2404  const char* vers = MOAB_VERSION;
2405  char date_str[64];
2406  char time_str[64];
2407 
2409 
2410  std::vector< const char* > strs( list.size() ? list.size() : 4 );
2411  if( list.size() == 0 )
2412  {
2413  time_t t = time( NULL );
2414  tm* lt = localtime( &t );
2415 #ifdef WIN32
2416  strftime( date_str, sizeof( date_str ), "%m/%d/%y", lt ); // VS 2008 does not support %D
2417  strftime( time_str, sizeof( time_str ), "%H:%M:%S", lt ); // VS 2008 does not support %T
2418 #else
2419  strftime( date_str, sizeof( date_str ), "%D", lt );
2420  strftime( time_str, sizeof( time_str ), "%T", lt );
2421 #endif
2422 
2423  strs[0] = app;
2424  strs[1] = vers;
2425  strs[2] = date_str;
2426  strs[3] = time_str;
2427  }
2428  else
2429  {
2430  for( unsigned int i = 0; i < list.size(); ++i )
2431  strs[i] = list[i].c_str();
2432  }
2433 
2434  mhdf_Status status;
2435  dbgOut.print( 2, " writing QA history.\n" );
2436  mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
2437  CHK_MHDF_ERR_0( status );
2438 
2439  return MB_SUCCESS;
2440 }

References CHECK_OPEN_HANDLES, CHK_MHDF_ERR_0, dbgOut, filePtr, MB_SUCCESS, mhdf_writeHistory(), MOAB_VERSION, and moab::DebugOutput::print().

Referenced by moab::WriteHDF5Parallel::parallel_create_file(), and serial_create_file().

◆ write_set_data()

ErrorCode moab::WriteHDF5::write_set_data ( const WriteUtilIface::EntityListType  which_data,
const hid_t  handle,
IODebugTrack track,
Range ranged = 0,
Range null_stripped = 0,
std::vector< long > *  set_sizes = 0 
)
private

Write set contents/parents/children lists

Parameters
which_dataWhich set data to write (contents, parents, or children)
handleHDF5 handle for data set in which to write data
trackDebugging tool
rangedWill be populated with handles of sets for which contents were written in a range-compacted format. (mhdf_SET_RANGE_BIT). Should be null for parents/children.
null_strippedWill be populated with handles of sets for which invalid or null handles were stripped from the contents list. This is only done for unordered sets. This argument should be null if writing parents/children because those lists are always ordered.
set_sizesWill be populated with the length of the data written for those sets for which the handles were added to either ranged or null_stripped. Values are in handle order.

Definition at line 1131 of file WriteHDF5.cpp.

1137 {
1138  // ranged must be non-null for CONTENTS and null for anything else
1139  assert( ( which_data == WriteUtilIface::CONTENTS ) == ( 0 != ranged ) );
1140  ErrorCode rval;
1141  mhdf_Status status;
1142 
1143  debug_barrier();
1144 
1145  // Function pointer type used to write set data
1146  void ( *write_func )( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* );
1147  long max_vals; // Max over all procs of number of values to write to data set
1148  long offset; // Offset in HDF5 dataset at which to write next block of data
1149  switch( which_data )
1150  {
1152  assert( ranged != 0 && null_stripped != 0 && set_sizes != 0 );
1153  write_func = &mhdf_writeSetDataWithOpt;
1154  max_vals = maxNumSetContents;
1155  offset = setContentsOffset;
1156  dbgOut.print( 2, "Writing set contents\n" );
1157  break;
1159  assert( !ranged && !null_stripped && !set_sizes );
1161  max_vals = maxNumSetChildren;
1162  offset = setChildrenOffset;
1163  dbgOut.print( 2, "Writing set child lists\n" );
1164  break;
1166  assert( !ranged && !null_stripped && !set_sizes );
1168  max_vals = maxNumSetParents;
1169  offset = setParentsOffset;
1170  dbgOut.print( 2, "Writing set parent lists\n" );
1171  break;
1172  default:
1173  assert( false );
1174  return MB_FAILURE;
1175  }
1176  // assert(max_vals > 0); // Should have skipped this function otherwise
1177 
1178  // buffer to use for IO
1179  wid_t* buffer = reinterpret_cast< wid_t* >( dataBuffer );
1180  // number of handles that will fit in the buffer
1181  const size_t buffer_size = bufferSize / sizeof( EntityHandle );
1182  // the total number of write calls that must be made, including no-ops for collective io
1183  const size_t num_total_writes = ( max_vals + buffer_size - 1 ) / buffer_size;
1184 
1185  std::vector< SpecialSetData >::iterator si = specialSets.begin();
1186 
1187  std::vector< wid_t > remaining; // data left over from prev iteration because it didn't fit in buffer
1188  size_t remaining_offset = 0; // avoid erasing from front of 'remaining'
1189  const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data
1190  size_t remaining_count = 0;
1191  const wid_t* special_rem_ptr = 0;
1192  Range::const_iterator i = setSet.range.begin(), j, rhint, nshint;
1193  if( ranged ) rhint = ranged->begin();
1194  if( null_stripped ) nshint = null_stripped->begin();
1195  for( size_t w = 0; w < num_total_writes; ++w )
1196  {
1197  if( i == setSet.range.end() && !remaining.empty() && !remaining_ptr )
1198  {
1199  // If here, then we've written everything but we need to
1200  // make more write calls because we're doing collective IO
1201  // in parallel
1202  ( *write_func )( handle, 0, 0, id_type, 0, writeProp, &status );
1203  CHK_MHDF_ERR_0( status );
1204  continue;
1205  }
1206 
1207  // If we had some left-over data from a range-compacted set
1208  // from the last iteration, add it to the buffer now
1209  size_t count = 0;
1210  if( !remaining.empty() )
1211  {
1212  count = remaining.size() - remaining_offset;
1213  if( count > buffer_size )
1214  {
1215  memcpy( buffer, &remaining[remaining_offset], buffer_size * sizeof( wid_t ) );
1216  count = buffer_size;
1217  remaining_offset += buffer_size;
1218  }
1219  else
1220  {
1221  memcpy( buffer, &remaining[remaining_offset], count * sizeof( wid_t ) );
1222  remaining_offset = 0;
1223  remaining.clear();
1224  }
1225  }
1226  // If we had some left-over data from a non-range-compacted set
1227  // from the last iteration, add it to the buffer now
1228  else if( remaining_ptr )
1229  {
1230  if( remaining_count > buffer_size )
1231  {
1232  rval = vector_to_id_list( remaining_ptr, buffer, buffer_size );
1233  CHK_MB_ERR_0( rval );
1234  count = buffer_size;
1235  remaining_ptr += count;
1236  remaining_count -= count;
1237  }
1238  else
1239  {
1240  rval = vector_to_id_list( remaining_ptr, buffer, remaining_count );
1241  CHK_MB_ERR_0( rval );
1242  count = remaining_count;
1243  remaining_ptr = 0;
1244  remaining_count = 0;
1245  }
1246  }
1247  // If we had some left-over data from a "special" (i.e. parallel shared)
1248  // set.
1249  else if( special_rem_ptr )
1250  {
1251  if( remaining_count > buffer_size )
1252  {
1253  memcpy( buffer, special_rem_ptr, buffer_size * sizeof( wid_t ) );
1254  count = buffer_size;
1255  special_rem_ptr += count;
1256  remaining_count -= count;
1257  }
1258  else
1259  {
1260  memcpy( buffer, special_rem_ptr, remaining_count * sizeof( wid_t ) );
1261  count = remaining_count;
1262  special_rem_ptr = 0;
1263  remaining_count = 0;
1264  }
1265  }
1266 
1267  // While there is both space remaining in the buffer and
1268  // more sets to write, append more set data to buffer.
1269 
1270  while( count < buffer_size && i != setSet.range.end() )
1271  {
1272  // Special case for "special" (i.e. parallel shared) sets:
1273  // we already have the data in a vector, just copy it.
1274  if( si != specialSets.end() && si->setHandle == *i )
1275  {
1276  std::vector< wid_t >& list = ( which_data == WriteUtilIface::CONTENTS ) ? si->contentIds
1277  : ( which_data == WriteUtilIface::PARENTS ) ? si->parentIds
1278  : si->childIds;
1279  size_t append = list.size();
1280  if( count + list.size() > buffer_size )
1281  {
1282  append = buffer_size - count;
1283  special_rem_ptr = &list[append];
1284  remaining_count = list.size() - append;
1285  }
1286  memcpy( buffer + count, &list[0], append * sizeof( wid_t ) );
1287  ++i;
1288  ++si;
1289  count += append;
1290  continue;
1291  }
1292 
1293  j = i;
1294  ++i;
1295  const EntityHandle* ptr;
1296  int len;
1297  unsigned char flags;
1298  rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags );
1299  if( MB_SUCCESS != rval ) return rval;
1300  if( which_data == WriteUtilIface::CONTENTS && !( flags & MESHSET_ORDERED ) )
1301  {
1302  bool compacted;
1303  remaining.clear();
1304  if( len == 0 )
1305  compacted = false;
1306  else
1307  {
1308  assert( !( len % 2 ) );
1309  rval = range_to_blocked_list( ptr, len / 2, remaining, compacted );
1310  if( MB_SUCCESS != rval ) return rval;
1311  }
1312  if( compacted )
1313  {
1314  rhint = ranged->insert( rhint, *j );
1315  set_sizes->push_back( remaining.size() );
1316  }
1317  else if( remaining.size() != (unsigned)len )
1318  {
1319  nshint = null_stripped->insert( nshint, *j );
1320  set_sizes->push_back( remaining.size() );
1321  }
1322 
1323  if( count + remaining.size() <= buffer_size )
1324  {
1325  if( !remaining.empty() )
1326  memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining.size() );
1327  count += remaining.size();
1328  remaining.clear();
1329  remaining_offset = 0;
1330  }
1331  else
1332  {
1333  remaining_offset = buffer_size - count;
1334  memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining_offset );
1335  count += remaining_offset;
1336  }
1337  }
1338  else
1339  {
1340  if( count + len > buffer_size )
1341  {
1342  size_t append = buffer_size - count;
1343  remaining_ptr = ptr + append;
1344  remaining_count = len - append;
1345  len = append;
1346  }
1347 
1348  rval = vector_to_id_list( ptr, buffer + count, len );
1349  count += len;
1350  }
1351  }
1352 
1353  // Write the buffer.
1354  ( *write_func )( handle, offset, count, id_type, buffer, writeProp, &status );
1355  CHK_MHDF_ERR_0( status );
1356  track.record_io( offset, count );
1357  offset += count;
1358  }
1359 
1360  return MB_SUCCESS;
1361 }

References moab::Range::begin(), buffer, bufferSize, moab::WriteUtilIface::CHILDREN, CHK_MB_ERR_0, CHK_MHDF_ERR_0, moab::WriteUtilIface::CONTENTS, dataBuffer, dbgOut, debug_barrier, moab::Range::end(), ErrorCode, moab::WriteUtilIface::get_entity_list_pointers(), id_type, moab::Range::insert(), maxNumSetChildren, maxNumSetContents, maxNumSetParents, MB_SUCCESS, mhdf_writeSetDataWithOpt(), mhdf_writeSetParentsChildrenWithOpt(), moab::WriteUtilIface::PARENTS, moab::DebugOutput::print(), moab::WriteHDF5::ExportSet::range, range_to_blocked_list(), moab::IODebugTrack::record_io(), setChildrenOffset, setContentsOffset, setParentsOffset, setSet, specialSets, vector_to_id_list(), writeProp, and writeUtil.

Referenced by write_sets().

◆ write_sets()

ErrorCode moab::WriteHDF5::write_sets ( double *  times)
private

Write out meshsets

Write passed set of meshsets, including parent/child relations.

Note: Must have written nodes and element connectivity so entities have assigned IDs.

Definition at line 1363 of file WriteHDF5.cpp.

1364 {
1365  mhdf_Status status;
1366  ErrorCode rval;
1367  long first_id, size;
1368  hid_t table;
1369  CpuTimer timer;
1370 
1372  /* If no sets, just return success */
1373  if( !writeSets ) return MB_SUCCESS;
1374 
1375  debug_barrier();
1376  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1377  dbgOut.print( 3, "Non-shared sets", setSet.range );
1378 
1379  /* Write set parents */
1380  if( writeSetParents )
1381  {
1382  topState.start( "writing parent lists for local sets" );
1383  table = mhdf_openSetParents( filePtr, &size, &status );
1384  CHK_MHDF_ERR_0( status );
1385  IODebugTrack track( debugTrack, "SetParents", size );
1386 
1387  rval = write_set_data( WriteUtilIface::PARENTS, table, track );
1388  topState.end( rval );
1389  CHK_MB_ERR_1( rval, table, status );
1390 
1391  mhdf_closeData( filePtr, table, &status );
1392  CHK_MHDF_ERR_0( status );
1393 
1394  times[SET_PARENT] = timer.time_elapsed();
1395  track.all_reduce();
1396  }
1397 
1398  /* Write set children */
1399  if( writeSetChildren )
1400  {
1401  topState.start( "writing child lists for local sets" );
1402  table = mhdf_openSetChildren( filePtr, &size, &status );
1403  CHK_MHDF_ERR_0( status );
1404  IODebugTrack track( debugTrack, "SetChildren", size );
1405 
1406  rval = write_set_data( WriteUtilIface::CHILDREN, table, track );
1407  topState.end( rval );
1408  CHK_MB_ERR_1( rval, table, status );
1409 
1410  mhdf_closeData( filePtr, table, &status );
1411  CHK_MHDF_ERR_0( status );
1412 
1413  times[SET_CHILD] = timer.time_elapsed();
1414  track.all_reduce();
1415  }
1416 
1417  /* Write set contents */
1418  Range ranged_sets, null_stripped_sets;
1419  std::vector< long > set_sizes;
1420  if( writeSetContents )
1421  {
1422  topState.start( "writing content lists for local sets" );
1423  table = mhdf_openSetData( filePtr, &size, &status );
1424  CHK_MHDF_ERR_0( status );
1425  IODebugTrack track( debugTrack, "SetContents", size );
1426 
1427  rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes );
1428  topState.end( rval );
1429  CHK_MB_ERR_1( rval, table, status );
1430 
1431  mhdf_closeData( filePtr, table, &status );
1432  CHK_MHDF_ERR_0( status );
1433 
1434  times[SET_CONTENT] = timer.time_elapsed();
1435  track.all_reduce();
1436  }
1437  assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
1438 
1439  /* Write set description table */
1440 
1441  debug_barrier();
1442  topState.start( "writing descriptions of local sets" );
1443  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1444  dbgOut.print( 3, "Non-shared sets", setSet.range );
1445 
1446  /* Open the table */
1447  table = mhdf_openSetMeta( filePtr, &size, &first_id, &status );
1448  CHK_MHDF_ERR_0( status );
1449  IODebugTrack track_meta( debugTrack, "SetMeta", size );
1450 
1451  /* Some debug stuff */
1452  debug_barrier();
1453  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1454  dbgOut.print( 3, "Non-shared sets", setSet.range );
1455 
1456  /* Counts and buffers and such */
1457  mhdf_index_t* const buffer = reinterpret_cast< mhdf_index_t* >( dataBuffer );
1458  const size_t buffer_size = bufferSize / ( 4 * sizeof( mhdf_index_t ) );
1459  const size_t num_local_writes = ( setSet.range.size() + buffer_size - 1 ) / buffer_size;
1460  const size_t num_global_writes = ( setSet.max_num_ents + buffer_size - 1 ) / buffer_size;
1461  assert( num_local_writes <= num_global_writes );
1462  assert( num_global_writes > 0 );
1463 
1464  /* data about sets for which number of handles written is
1465  * not the same as the number of handles in the set
1466  * (range-compacted or null handles stripped out)
1467  */
1468  Range::const_iterator i = setSet.range.begin();
1469  Range::const_iterator r = ranged_sets.begin();
1470  Range::const_iterator s = null_stripped_sets.begin();
1471  std::vector< mhdf_index_t >::const_iterator n = set_sizes.begin();
1472  assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
1473 
1474  /* We write the end index for each list, rather than the count */
1475  mhdf_index_t prev_contents_end = setContentsOffset - 1;
1476  mhdf_index_t prev_children_end = setChildrenOffset - 1;
1477  mhdf_index_t prev_parents_end = setParentsOffset - 1;
1478 
1479  /* While there is more data to write */
1480  size_t offset = setSet.offset;
1481  std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
1482  for( size_t w = 0; w < num_local_writes; ++w )
1483  {
1484  // Get a buffer full of data
1485  size_t count = 0;
1486  while( count < buffer_size && i != setSet.range.end() )
1487  {
1488  // Get set properties
1489  long num_ent, num_child, num_parent;
1490  unsigned long flags;
1491  if( si != specialSets.end() && si->setHandle == *i )
1492  {
1493  flags = si->setFlags;
1494  num_ent = si->contentIds.size();
1495  num_child = si->childIds.size();
1496  num_parent = si->parentIds.size();
1497  ++si;
1498  if( r != ranged_sets.end() && *i == *r )
1499  {
1500  assert( flags & mhdf_SET_RANGE_BIT );
1501  ++r;
1502  ++n;
1503  }
1504  else if( s != null_stripped_sets.end() && *i == *s )
1505  {
1506  ++s;
1507  ++n;
1508  }
1509  }
1510  else
1511  {
1512  assert( si == specialSets.end() || si->setHandle > *i );
1513 
1514  // Get set properties
1515  rval = get_set_info( *i, num_ent, num_child, num_parent, flags );
1516  CHK_MB_ERR_1( rval, table, status );
1517 
1518  // Check if size is something other than num handles in set
1519  if( r != ranged_sets.end() && *i == *r )
1520  {
1521  num_ent = *n;
1522  ++r;
1523  ++n;
1524  flags |= mhdf_SET_RANGE_BIT;
1525  }
1526  else if( s != null_stripped_sets.end() && *i == *s )
1527  {
1528  num_ent = *n;
1529  ++s;
1530  ++n;
1531  }
1532  }
1533 
1534  // Put data in buffer
1535  mhdf_index_t* local = buffer + 4 * count;
1536  prev_contents_end += num_ent;
1537  prev_children_end += num_child;
1538  prev_parents_end += num_parent;
1539  local[0] = prev_contents_end;
1540  local[1] = prev_children_end;
1541  local[2] = prev_parents_end;
1542  local[3] = flags;
1543 
1544  // Iterate
1545  ++count;
1546  ++i;
1547  }
1548 
1549  // Write the data
1550  mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status );
1551  CHK_MHDF_ERR_1( status, table );
1552  track_meta.record_io( offset, count );
1553  offset += count;
1554  }
1555  assert( r == ranged_sets.end() );
1556  assert( s == null_stripped_sets.end() );
1557  assert( n == set_sizes.end() );
1558 
1559  /* If doing parallel write with collective IO, do null write
1560  * calls because other procs aren't done yet and write calls
1561  * are collective */
1562  for( size_t w = num_local_writes; w != num_global_writes; ++w )
1563  {
1564  mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status );
1565  CHK_MHDF_ERR_1( status, table );
1566  }
1567 
1568  topState.end();
1569  mhdf_closeData( filePtr, table, &status );
1570  CHK_MHDF_ERR_0( status );
1571 
1572  times[SET_META] = timer.time_elapsed();
1573  track_meta.all_reduce();
1574 
1575  return MB_SUCCESS;
1576 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), buffer, bufferSize, CHECK_OPEN_HANDLES, moab::WriteUtilIface::CHILDREN, CHK_MB_ERR_1, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, moab::WriteUtilIface::CONTENTS, dataBuffer, dbgOut, debug_barrier, debugTrack, moab::Range::end(), ErrorCode, filePtr, get_set_info(), moab::WriteHDF5::ExportSet::max_num_ents, MB_SUCCESS, mhdf_closeData(), MHDF_INDEX_TYPE, mhdf_openSetChildren(), mhdf_openSetData(), mhdf_openSetMeta(), mhdf_openSetParents(), mhdf_SET_RANGE_BIT, mhdf_writeSetMetaWithOpt(), moab::WriteHDF5::ExportSet::offset, moab::WriteUtilIface::PARENTS, moab::DebugOutput::print(), moab::DebugOutput::printf(), moab::WriteHDF5::ExportSet::range, moab::IODebugTrack::record_io(), SET_CHILD, SET_CONTENT, SET_META, SET_PARENT, setChildrenOffset, setContentsOffset, setParentsOffset, setSet, size, moab::Range::size(), specialSets, moab::CpuTimer::time_elapsed(), topState, write_set_data(), writeProp, writeSetChildren, writeSetContents, writeSetParents, and writeSets.

Referenced by write_file_impl().

◆ write_sparse_ids()

ErrorCode moab::WriteHDF5::write_sparse_ids ( const TagDesc tag_data,
const Range range,
hid_t  table_handle,
size_t  table_size,
const char *  name = 0 
)
private

Write ID table for sparse tag.

Definition at line 1939 of file WriteHDF5.cpp.

1944 {
1945  ErrorCode rval;
1946  mhdf_Status status;
1947 
1949 
1950  std::string tname( name ? name : "<UNKNOWN TAG?>" );
1951  tname += " - Ids";
1952  IODebugTrack track( debugTrack, tname, table_size );
1953 
1954  // Set up data buffer for writing IDs
1955  size_t chunk_size = bufferSize / sizeof( wid_t );
1956  wid_t* id_buffer = (wid_t*)dataBuffer;
1957 
1958  // Write IDs of tagged entities.
1959  long remaining = range.size();
1960  long offset = tag_data.sparse_offset;
1961  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
1962  if( tag_data.max_num_ents )
1963  {
1964  assert( tag_data.max_num_ents >= (unsigned long)remaining );
1965  num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
1966  }
1967  Range::const_iterator iter = range.begin();
1968  while( remaining )
1969  {
1971 
1972  // Write "chunk_size" blocks of data
1973  long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
1974  remaining -= count;
1975  Range::const_iterator stop = iter;
1976  stop += count;
1977  Range tmp;
1978  ;
1979  tmp.merge( iter, stop );
1980  iter = stop;
1981  assert( tmp.size() == (unsigned)count );
1982 
1983  rval = range_to_id_list( tmp, id_buffer );
1984  CHK_MB_ERR_0( rval );
1985 
1986  // Write the data
1987  dbgOut.print( 3, " writing sparse tag entity chunk.\n" );
1988  track.record_io( offset, count );
1989  mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status );
1990  CHK_MHDF_ERR_0( status );
1991 
1992  offset += count;
1993  --num_writes;
1994  } // while (remaining)
1995 
1996  // Do empty writes if necessary for parallel collective IO
1997  if( collectiveIO )
1998  {
1999  while( num_writes-- )
2000  {
2001  assert( writeProp != H5P_DEFAULT );
2002  dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
2003  mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status );
2004  CHK_MHDF_ERR_0( status );
2005  }
2006  }
2007 
2008  track.all_reduce();
2009  return MB_SUCCESS;
2010 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, collectiveIO, dataBuffer, dbgOut, debugTrack, ErrorCode, id_type, moab::WriteHDF5::TagDesc::max_num_ents, MB_SUCCESS, moab::Range::merge(), mhdf_writeSparseTagEntitiesWithOpt(), moab::DebugOutput::print(), range_to_id_list(), moab::IODebugTrack::record_io(), moab::Range::size(), moab::WriteHDF5::TagDesc::sparse_offset, VALGRIND_MAKE_MEM_UNDEFINED, and writeProp.

Referenced by write_sparse_tag(), and write_var_len_tag().

◆ write_sparse_tag()

ErrorCode moab::WriteHDF5::write_sparse_tag ( const TagDesc tag_data,
const std::string &  tag_name,
DataType  tag_data_type,
hid_t  hdf5_data_type,
int  hdf5_type_size 
)
private

Write fixed-length tag data in sparse format.

Definition at line 2012 of file WriteHDF5.cpp.

2017 {
2018  ErrorCode rval;
2019  mhdf_Status status;
2020  hid_t tables[3];
2021  long table_size, data_size;
2022 
2024 
2025  // Get entities for which to write tag values
2026  Range range;
2027  rval = get_sparse_tagged_entities( tag_data, range );
2028 
2029  // Open tables to write info
2030  mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status );
2031  CHK_MHDF_ERR_0( status );
2032  assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
2033  // Fixed-length tag
2034  assert( table_size == data_size );
2035 
2036  // Write IDs for tagged entities
2037  subState.start( "writing sparse ids for tag: ", name.c_str() );
2038  rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
2039  subState.end( rval );
2040  CHK_MB_ERR_2( rval, tables, status );
2041  mhdf_closeData( filePtr, tables[0], &status );
2042  CHK_MHDF_ERR_1( status, tables[1] );
2043 
2044  // Set up data buffer for writing tag values
2045  IODebugTrack track( debugTrack, name + " Data", data_size );
2046  subState.start( "writing sparse values for tag: ", name.c_str() );
2047  rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type,
2048  value_type_size, tag_data.max_num_ents, track );
2049  subState.end( rval );
2050  CHK_MB_ERR_0( rval );
2051  mhdf_closeData( filePtr, tables[1], &status );
2052  CHK_MHDF_ERR_0( status );
2053 
2054  track.all_reduce();
2055  return MB_SUCCESS;
2056 }

References moab::IODebugTrack::all_reduce(), CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MB_ERR_2, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, debugTrack, ErrorCode, filePtr, get_sparse_tagged_entities(), moab::WriteHDF5::TagDesc::max_num_ents, MB_SUCCESS, mhdf_closeData(), mhdf_openSparseTagData(), moab::Range::size(), moab::WriteHDF5::TagDesc::sparse_offset, subState, moab::WriteHDF5::TagDesc::tag_id, write_sparse_ids(), and write_tag_values().

Referenced by write_tag().

◆ write_tag()

ErrorCode moab::WriteHDF5::write_tag ( const TagDesc tag_data,
double *  times 
)
private

Write tag for all entities.

Write tag information and data.

Note: Must have already written nodes, elem connectivity and sets so that entities have IDs assigned.

Definition at line 1888 of file WriteHDF5.cpp.

1889 {
1890  std::string name;
1891  ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name );
1892  if( MB_SUCCESS != rval ) return error( rval );
1893 
1895  debug_barrier();
1896  dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() );
1897 
1898  int moab_size, elem_size, array_len;
1899  DataType moab_type;
1900  mhdf_TagDataType mhdf_type;
1901  hid_t hdf5_type;
1902  rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type );
1903  if( MB_SUCCESS != rval ) return error( rval );
1904 
1905  CpuTimer timer;
1906  if( array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse )
1907  {
1908  dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() );
1909  rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size );
1910  times[VARLEN_TAG_TIME] += timer.time_elapsed();
1911  }
1912  else
1913  {
1914  int data_len = elem_size;
1915  if( moab_type != MB_TYPE_BIT ) data_len *= array_len;
1916  if( tag_data.write_sparse )
1917  {
1918  dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() );
1919  rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len );
1920  times[SPARSE_TAG_TIME] += timer.time_elapsed();
1921  }
1922  for( size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i )
1923  {
1924  const ExportSet* set = find( tag_data.dense_list[i] );
1925  assert( 0 != set );
1926  debug_barrier();
1927  dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() );
1928  subState.start( "writing dense data for tag: ", ( name + ":" + set->name() ).c_str() );
1929  rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len );
1930  subState.end( rval );
1931  }
1932  times[DENSE_TAG_TIME] += timer.time_elapsed();
1933  }
1934 
1935  H5Tclose( hdf5_type );
1936  return MB_SUCCESS == rval ? MB_SUCCESS : error( rval );
1937 }

References CHECK_OPEN_HANDLES, dbgOut, debug_barrier, moab::WriteHDF5::TagDesc::dense_list, DENSE_TAG_TIME, moab::error(), ErrorCode, find(), get_tag_size(), iFace, MB_SUCCESS, MB_TYPE_BIT, MB_VARIABLE_LENGTH, moab::WriteHDF5::ExportSet::name(), moab::DebugOutput::printf(), SPARSE_TAG_TIME, subState, moab::Interface::tag_get_name(), moab::WriteHDF5::TagDesc::tag_id, moab::CpuTimer::time_elapsed(), moab::DebugOutput::tprintf(), VARLEN_TAG_TIME, write_dense_tag(), moab::WriteHDF5::TagDesc::write_sparse, write_sparse_tag(), and write_var_len_tag().

Referenced by write_file_impl().

◆ write_tag_values()

ErrorCode moab::WriteHDF5::write_tag_values ( Tag  tag_id,
hid_t  data_table,
unsigned long  data_offset,
const Range range,
DataType  tag_data_type,
hid_t  hdf5_data_type,
int  hdf5_type_size,
unsigned long  max_num_ents,
IODebugTrack debug_track 
)
private

Write data for fixed-size tag.

Definition at line 2321 of file WriteHDF5.cpp.

2330 {
2331  mhdf_Status status;
2332 
2334 
2335  // Set up data buffer for writing tag values
2336  size_t chunk_size = bufferSize / value_type_size;
2337  assert( chunk_size > 0 );
2338  char* tag_buffer = (char*)dataBuffer;
2339 
2340  // Write the tag values
2341  size_t remaining = range_in.size();
2342  size_t offset = offset_in;
2343  Range::const_iterator iter = range_in.begin();
2344  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
2345  if( max_num_ents )
2346  {
2347  assert( max_num_ents >= remaining );
2348  num_writes = ( max_num_ents + chunk_size - 1 ) / chunk_size;
2349  }
2350  while( remaining )
2351  {
2353 
2354  // Write "chunk_size" blocks of data
2355  long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
2356  remaining -= count;
2357  memset( tag_buffer, 0, count * value_type_size );
2358  Range::const_iterator stop = iter;
2359  stop += count;
2360  Range range;
2361  range.merge( iter, stop );
2362  iter = stop;
2363  assert( range.size() == (unsigned)count );
2364 
2365  ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer );
2366  CHK_MB_ERR_0( rval );
2367 
2368  // Convert EntityHandles to file ids
2369  if( mb_data_type == MB_TYPE_HANDLE )
2370  convert_handle_tag( reinterpret_cast< EntityHandle* >( tag_buffer ),
2371  count * value_type_size / sizeof( EntityHandle ) );
2372 
2373  // Write the data
2374  dbgOut.print( 2, " writing tag value chunk.\n" );
2375  track.record_io( offset, count );
2376  assert( value_type > 0 );
2377  mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status );
2378  CHK_MHDF_ERR_0( status );
2379 
2380  offset += count;
2381  --num_writes;
2382  } // while (remaining)
2383 
2384  // Do empty writes if necessary for parallel collective IO
2385  if( collectiveIO )
2386  {
2387  while( num_writes-- )
2388  {
2389  assert( writeProp != H5P_DEFAULT );
2390  dbgOut.print( 2, " writing empty tag value chunk.\n" );
2391  assert( value_type > 0 );
2392  mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status );
2393  CHK_MHDF_ERR_0( status );
2394  }
2395  }
2396 
2397  track.all_reduce();
2398  return MB_SUCCESS;
2399 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, collectiveIO, convert_handle_tag(), dataBuffer, dbgOut, ErrorCode, iFace, MB_SUCCESS, MB_TYPE_HANDLE, moab::Range::merge(), mhdf_writeTagValuesWithOpt(), moab::DebugOutput::print(), moab::IODebugTrack::record_io(), moab::Range::size(), moab::Interface::tag_get_data(), VALGRIND_MAKE_MEM_UNDEFINED, and writeProp.

Referenced by write_dense_tag(), and write_sparse_tag().

◆ write_util()

WriteUtilIface* moab::WriteHDF5::write_util ( )
inline

Definition at line 204 of file WriteHDF5.hpp.

205  {
206  return writeUtil;
207  }

References writeUtil.

◆ write_var_len_data()

ErrorCode moab::WriteHDF5::write_var_len_data ( const TagDesc tag_data,
const Range range,
hid_t  table,
size_t  table_size,
bool  handle_tag,
hid_t  hdf_type,
int  type_size,
const char *  name = 0 
)
private

Write tag value data_set for a variable-length tag.

Definition at line 2141 of file WriteHDF5.cpp.

2149 {
2150  ErrorCode rval;
2151  mhdf_Status status;
2152 
2154  assert( !handle_tag || sizeof( EntityHandle ) == type_size );
2155 
2156  std::string tname( name ? name : "<UNKNOWN TAG?>" );
2157  tname += " - Values";
2158  IODebugTrack track( debugTrack, tname, table_size );
2159 
2160  const size_t buffer_size = bufferSize / type_size;
2161 
2162  size_t num_writes = ( table_size + buffer_size - 1 ) / buffer_size;
2163  if( collectiveIO )
2164  {
2165  assert( tag_data.max_num_vals > 0 );
2166  num_writes = ( tag_data.max_num_vals + buffer_size - 1 ) / buffer_size;
2167  }
2168 
2169  unsigned char* buffer = (unsigned char*)dataBuffer;
2170  const void* prev_data = 0; // Data left over from prev iteration
2171  size_t prev_len = 0;
2172  Range::const_iterator iter = range.begin();
2173  long offset = tag_data.var_data_offset;
2174  while( prev_data || iter != range.end() )
2175  {
2176  size_t count = 0;
2177  if( prev_data )
2178  {
2179  size_t len;
2180  const void* ptr = prev_data;
2181  if( prev_len <= buffer_size )
2182  {
2183  len = prev_len;
2184  prev_data = 0;
2185  prev_len = 0;
2186  }
2187  else
2188  {
2189  len = buffer_size;
2190  prev_data = ( (const char*)prev_data ) + buffer_size * type_size;
2191  prev_len -= buffer_size;
2192  }
2193 
2194  if( handle_tag )
2195  convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len );
2196  else
2197  memcpy( buffer, ptr, len * type_size );
2198  count = len;
2199  }
2200 
2201  for( ; count < buffer_size && iter != range.end(); ++iter )
2202  {
2203  int len;
2204  const void* ptr;
2205  rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len );
2206  CHK_MB_ERR_0( rval );
2207  if( len + count > buffer_size )
2208  {
2209  prev_len = len + count - buffer_size;
2210  len = buffer_size - count;
2211  prev_data = ( (const char*)ptr ) + len * type_size;
2212  }
2213 
2214  if( handle_tag )
2215  convert_handle_tag( (const EntityHandle*)ptr, ( (EntityHandle*)buffer ) + count, len );
2216  else
2217  memcpy( buffer + count * type_size, ptr, len * type_size );
2218  count += len;
2219  }
2220 
2221  track.record_io( offset, count );
2222  mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status );
2223  offset += count;
2224  CHK_MHDF_ERR_0( status );
2225  --num_writes;
2226  }
2227 
2228  // Do empty writes if necessary for parallel collective IO
2229  if( collectiveIO )
2230  {
2231  while( num_writes-- )
2232  {
2233  assert( writeProp != H5P_DEFAULT );
2234  dbgOut.print( 3, " writing empty var-len tag data chunk.\n" );
2235  mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status );
2236  CHK_MHDF_ERR_0( status );
2237  }
2238  }
2239 
2240  track.all_reduce();
2241  return MB_SUCCESS;
2242 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), buffer, bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, collectiveIO, convert_handle_tag(), dataBuffer, dbgOut, debugTrack, moab::Range::end(), ErrorCode, iFace, moab::WriteHDF5::TagDesc::max_num_vals, MB_SUCCESS, mhdf_writeTagValuesWithOpt(), moab::DebugOutput::print(), moab::IODebugTrack::record_io(), moab::Interface::tag_get_by_ptr(), moab::WriteHDF5::TagDesc::tag_id, moab::WriteHDF5::TagDesc::var_data_offset, and writeProp.

Referenced by write_var_len_tag().

◆ write_var_len_indices()

ErrorCode moab::WriteHDF5::write_var_len_indices ( const TagDesc tag_data,
const Range range,
hid_t  idx_table,
size_t  table_size,
int  type_size,
const char *  name = 0 
)
private

Write end index data_set for a variable-length tag.

Definition at line 2058 of file WriteHDF5.cpp.

2064 {
2065  ErrorCode rval;
2066  mhdf_Status status;
2067 
2069 
2070  std::string tname( name ? name : "<UNKNOWN TAG?>" );
2071  tname += " - End Indices";
2072  IODebugTrack track( debugTrack, tname, table_size );
2073 
2074  // Set up data buffer for writing indices
2075  size_t chunk_size = bufferSize / ( std::max( sizeof( void* ), sizeof( long ) ) + sizeof( int ) );
2076  mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer;
2077  const void** junk = (const void**)dataBuffer;
2078  int* size_buffer = (int*)( dataBuffer + chunk_size * std::max( sizeof( void* ), sizeof( mhdf_index_t ) ) );
2079 
2080  // Write IDs of tagged entities.
2081  long data_offset = tag_data.var_data_offset - 1; // Offset at which to write data buffer
2082  size_t remaining = range.size();
2083  size_t offset = tag_data.sparse_offset;
2084  size_t num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
2085  if( tag_data.max_num_ents )
2086  {
2087  assert( tag_data.max_num_ents >= (unsigned long)remaining );
2088  num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
2089  }
2090  Range::const_iterator iter = range.begin();
2091  while( remaining )
2092  {
2094 
2095  // Write "chunk_size" blocks of data
2096  size_t count = remaining > chunk_size ? chunk_size : remaining;
2097  remaining -= count;
2098  Range::const_iterator stop = iter;
2099  stop += count;
2100  Range tmp;
2101  tmp.merge( iter, stop );
2102  iter = stop;
2103  assert( tmp.size() == (unsigned)count );
2104 
2105  rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer );
2106  CHK_MB_ERR_0( rval );
2107 
2108  // Calculate end indices
2109  dbgOut.print( 3, " writing var-len tag offset chunk.\n" );
2110  track.record_io( offset, count );
2111  for( size_t i = 0; i < count; ++i )
2112  {
2113  data_offset += size_buffer[i];
2114  idx_buffer[i] = data_offset;
2115  }
2116 
2117  // Write
2118  mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status );
2119  CHK_MHDF_ERR_0( status );
2120 
2121  offset += count;
2122  --num_writes;
2123  } // while (remaining)
2124 
2125  // Do empty writes if necessary for parallel collective IO
2126  if( collectiveIO )
2127  {
2128  while( num_writes-- )
2129  {
2130  assert( writeProp != H5P_DEFAULT );
2131  dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
2132  mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status );
2133  CHK_MHDF_ERR_0( status );
2134  }
2135  }
2136 
2137  track.all_reduce();
2138  return MB_SUCCESS;
2139 }

References moab::IODebugTrack::all_reduce(), moab::Range::begin(), bufferSize, CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MHDF_ERR_0, collectiveIO, dataBuffer, dbgOut, debugTrack, ErrorCode, id_type, iFace, moab::WriteHDF5::TagDesc::max_num_ents, MB_SUCCESS, moab::Range::merge(), MHDF_INDEX_TYPE, mhdf_writeSparseTagIndicesWithOpt(), moab::DebugOutput::print(), moab::IODebugTrack::record_io(), moab::Range::size(), moab::WriteHDF5::TagDesc::sparse_offset, moab::Interface::tag_get_by_ptr(), moab::WriteHDF5::TagDesc::tag_id, VALGRIND_MAKE_MEM_UNDEFINED, moab::WriteHDF5::TagDesc::var_data_offset, and writeProp.

Referenced by write_var_len_tag().

◆ write_var_len_tag()

ErrorCode moab::WriteHDF5::write_var_len_tag ( const TagDesc tag_info,
const std::string &  tag_name,
DataType  tag_data_type,
hid_t  hdf5_type,
int  hdf5_type_size 
)
private

Write varialbe-length tag data.

Definition at line 2244 of file WriteHDF5.cpp.

2249 {
2250  ErrorCode rval;
2251  mhdf_Status status;
2252  hid_t tables[3];
2253  long table_size;
2254  long data_table_size;
2255 
2257 
2258  // Get entities for which to write tag values
2259  Range range;
2260  rval = get_sparse_tagged_entities( tag_data, range );
2261 
2262  // Open tables to write info
2263  mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status );
2264  CHK_MHDF_ERR_0( status );
2265  assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
2266 
2267  // Write IDs for tagged entities
2268  subState.start( "writing ids for var-len tag: ", name.c_str() );
2269  rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
2270  subState.end( rval );
2271  CHK_MB_ERR_2( rval, tables, status );
2272  mhdf_closeData( filePtr, tables[0], &status );
2273  CHK_MHDF_ERR_2( status, tables + 1 );
2274 
2275  // Write offsets for tagged entities
2276  subState.start( "writing indices for var-len tag: ", name.c_str() );
2277  rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() );
2278  subState.end( rval );
2279  CHK_MB_ERR_1( rval, tables[1], status );
2280  mhdf_closeData( filePtr, tables[2], &status );
2281  CHK_MHDF_ERR_1( status, tables[1] );
2282 
2283  // Write the actual tag data
2284  subState.start( "writing values for var-len tag: ", name.c_str() );
2285  rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type,
2286  type_size, name.c_str() );
2287  subState.end( rval );
2288  CHK_MB_ERR_0( rval );
2289  mhdf_closeData( filePtr, tables[1], &status );
2290  CHK_MHDF_ERR_0( status );
2291 
2292  return MB_SUCCESS;
2293 }

References CHECK_OPEN_HANDLES, CHK_MB_ERR_0, CHK_MB_ERR_1, CHK_MB_ERR_2, CHK_MHDF_ERR_0, CHK_MHDF_ERR_1, CHK_MHDF_ERR_2, ErrorCode, filePtr, get_sparse_tagged_entities(), MB_SUCCESS, MB_TYPE_HANDLE, mhdf_closeData(), mhdf_openSparseTagData(), moab::Range::size(), moab::WriteHDF5::TagDesc::sparse_offset, subState, write_sparse_ids(), write_var_len_data(), and write_var_len_indices().

Referenced by write_tag().

Member Data Documentation

◆ bufferSize

size_t moab::WriteHDF5::bufferSize
protected

◆ collectiveIO

bool moab::WriteHDF5::collectiveIO
protected

◆ dataBuffer

char* moab::WriteHDF5::dataBuffer
protected

◆ dbgOut

◆ debugTrack

bool moab::WriteHDF5::debugTrack
protected

◆ errorHandler

HDF5ErrorHandler moab::WriteHDF5::errorHandler
protected

Store old HDF5 error handling function.

Definition at line 211 of file WriteHDF5.hpp.

Referenced by init(), and write_finished().

◆ exportList

◆ filePtr

◆ id_type

const hid_t moab::WriteHDF5::id_type = get_id_type()
static

◆ idMap

◆ iFace

◆ maxNumSetChildren

long moab::WriteHDF5::maxNumSetChildren
protected

◆ maxNumSetContents

long moab::WriteHDF5::maxNumSetContents
protected

The largest number of values to write for any processor (needed to do collective IO).

Definition at line 343 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), initialize_mesh(), serial_create_file(), and write_set_data().

◆ maxNumSetParents

long moab::WriteHDF5::maxNumSetParents
protected

◆ nodeSet

◆ parallelWrite

bool moab::WriteHDF5::parallelWrite
protected

True if doing parallel write.

Definition at line 384 of file WriteHDF5.hpp.

Referenced by write_file(), and write_file_impl().

◆ setChildrenOffset

unsigned long moab::WriteHDF5::setChildrenOffset
protected

Offset into set children table (zero except for parallel)

Definition at line 340 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), serial_create_file(), write_set_data(), and write_sets().

◆ setContentsOffset

unsigned long moab::WriteHDF5::setContentsOffset
protected

Offset into set contents table (zero except for parallel)

Definition at line 338 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), serial_create_file(), write_set_data(), and write_sets().

◆ setParentsOffset

unsigned long moab::WriteHDF5::setParentsOffset
protected

◆ setSet

◆ specialSets

std::vector< SpecialSetData > moab::WriteHDF5::specialSets
protected

Array of special/shared sets, in order of handle value.

Definition at line 373 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::communicate_shared_set_data(), count_set_size(), find_set_data(), write_set_data(), and write_sets().

◆ subState

MPEState moab::WriteHDF5::subState
staticprotected

◆ tagList

std::list< TagDesc > moab::WriteHDF5::tagList
protected

◆ topState

MPEState moab::WriteHDF5::topState
staticprotected

◆ writeProp

hid_t moab::WriteHDF5::writeProp
protected

Property set to pass to H5Dwrite calls. For serial, should be H5P_DEFAULTS. For parallel, may request collective IO.

Definition at line 393 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::parallel_create_file(), write_adjacencies(), write_elems(), write_file(), write_nodes(), write_set_data(), write_sets(), write_sparse_ids(), write_tag_values(), write_var_len_data(), and write_var_len_indices().

◆ writeSetChildren

bool moab::WriteHDF5::writeSetChildren
protected

◆ writeSetContents

bool moab::WriteHDF5::writeSetContents
protected

◆ writeSetParents

bool moab::WriteHDF5::writeSetParents
protected

◆ writeSets

bool moab::WriteHDF5::writeSets
protected

Flags idicating if set data should be written. For the normal (non-parallel) case, these values will depend only on whether or not there is any data to be written. For parallel-meshes, opening the data table is collective so the values must depend on whether or not any processor has meshsets to be written.

Definition at line 351 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::create_meshset_tables(), serial_create_file(), and write_sets().

◆ writeTagDense

bool moab::WriteHDF5::writeTagDense
protected

True if writing dense-formatted tag data.

Definition at line 388 of file WriteHDF5.hpp.

Referenced by moab::WriteHDF5Parallel::create_tag_tables(), serial_create_file(), and write_file().

◆ writeUtil

WriteUtilIface* moab::WriteHDF5::writeUtil
protected

The documentation for this class was generated from the following files: