Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
moab::ReadHDF5VarLen Class Referenceabstract

Read variable-length data from 1-D array dataset. More...

#include <ReadHDF5VarLen.hpp>

+ Collaboration diagram for moab::ReadHDF5VarLen:

Public Member Functions

 ReadHDF5VarLen (DebugOutput &debug_output, void *buffer, size_t buffer_size)
 Constructor. More...
 
virtual ~ReadHDF5VarLen ()
 
ErrorCode read_data (ReadHDF5Dataset &data_set, const Range &offsets, EntityHandle start_offset, hid_t data_type, const Range &file_ids, const std::vector< unsigned > &vals_per_ent, const Range &ranged_file_ids)
 Do actual read of data set. More...
 
ErrorCode read_offsets (ReadHDF5Dataset &data_set, const Range &file_ids, EntityHandle start_file_id, EntityHandle nudge, Range &offsets_out, std::vector< unsigned > &counts_out)
 Read set description table or offset vector for var-len tags or old-format poly(gon|hedra) connectivity. More...
 
ErrorCode read (ReadHDF5Dataset &offset_data, ReadHDF5Dataset &value_data, const Range &file_ids, EntityHandle start_file_id, hid_t data_type, const Range *ranged=0)
 

Protected Member Functions

virtual ErrorCode store_data (EntityHandle file_id, void *data, long num_data, bool ranged)=0
 Store data list for a single entity. More...
 

Protected Attributes

DebugOutputdbgOut
 

Static Private Member Functions

static bool is_ranged (EntityHandle file_id, Range::const_iterator &ranged_iter, Range::const_iterator ranged_end)
 Test if passed file_id is value pointed to by ranged_iter, and if so, incremenet ranged_iter. More...
 

Private Attributes

void *const dataBuffer
 
const size_t bufferSize
 

Detailed Description

Read variable-length data from 1-D array dataset.

Utility class for reading variable-length data from an HDF5 dataset. Used for reading set contents, set parents, set children, polygon and polyhedron connectivity, and variable-length tag data.

This is an abstract class. The pure virtual store_data method must be implemented to create a concrete instance.

Definition at line 32 of file ReadHDF5VarLen.hpp.

Constructor & Destructor Documentation

◆ ReadHDF5VarLen()

moab::ReadHDF5VarLen::ReadHDF5VarLen ( DebugOutput debug_output,
void *  buffer,
size_t  buffer_size 
)
inline

Constructor.

Parameters
bufferA temporary buffer to use during read
buffer_sizeSize of buffer, in bytes.

Definition at line 68 of file ReadHDF5VarLen.hpp.

69  : dbgOut( debug_output ), dataBuffer( buffer ), bufferSize( buffer_size )
70  {
71  }

◆ ~ReadHDF5VarLen()

virtual moab::ReadHDF5VarLen::~ReadHDF5VarLen ( )
inlinevirtual

Definition at line 73 of file ReadHDF5VarLen.hpp.

73 {}

Member Function Documentation

◆ is_ranged()

bool moab::ReadHDF5VarLen::is_ranged ( EntityHandle  file_id,
Range::const_iterator ranged_iter,
Range::const_iterator  ranged_end 
)
staticprivate

Test if passed file_id is value pointed to by ranged_iter, and if so, incremenet ranged_iter.

Definition at line 14 of file ReadHDF5VarLen.cpp.

17 {
18  if( ranged_iter == range_end ) return false;
19 
20  assert( file_id <= *ranged_iter );
21  if( *ranged_iter != file_id ) return false;
22 
23  ++ranged_iter;
24  return true;
25 }

Referenced by read_data().

◆ read()

ErrorCode moab::ReadHDF5VarLen::read ( ReadHDF5Dataset offset_data,
ReadHDF5Dataset value_data,
const Range file_ids,
EntityHandle  start_file_id,
hid_t  data_type,
const Range ranged = 0 
)
inline

Definition at line 135 of file ReadHDF5VarLen.hpp.

141  {
142  ErrorCode rval;
143  const EntityHandle nudge = 1;
144  Range offsets;
145  std::vector< unsigned > counts;
146  rval = read_offsets( offset_data, file_ids, start_file_id, nudge, offsets, counts );
147  if( MB_SUCCESS != rval ) return rval;
148  Range empty;
149  rval = read_data( value_data, offsets, nudge, data_type, file_ids, counts, ranged ? *ranged : empty );
150  return rval;
151  }

References ErrorCode, MB_SUCCESS, read_data(), and read_offsets().

◆ read_data()

ErrorCode moab::ReadHDF5VarLen::read_data ( ReadHDF5Dataset data_set,
const Range offsets,
EntityHandle  start_offset,
hid_t  data_type,
const Range file_ids,
const std::vector< unsigned > &  vals_per_ent,
const Range ranged_file_ids 
)

Do actual read of data set.

Parameters
data_setThe data set to read.
file_idsThe file ids of the entities to read.
start_file_idThe file id corresponding to the first row of the dataset
data_typeThe desired, in-memory data type for values
vals_per_entThe number of values for each entity
ranged_file_idsThose file ids for which the 'ranged' argument to storedata should be passed as true.

Definition at line 27 of file ReadHDF5VarLen.cpp.

34 {
35  ErrorCode rval;
36  const size_t value_size = H5Tget_size( data_type );
37  const size_t buffer_size = bufferSize / value_size;
38  unsigned char* const data_buffer = reinterpret_cast< unsigned char* >( dataBuffer );
39  std::vector< unsigned char > partial; // for when we read only part of the contents of a set/entity
40  Range::const_iterator fileid_iter = file_ids.begin();
41  Range::const_iterator ranged_iter = ranged_file_ids.begin();
42  std::vector< unsigned >::const_iterator count_iter = vals_per_ent.begin();
43  size_t count, offset;
44  bool ranged;
45  int nn = 0;
46 
47  assert( file_ids.size() == vals_per_ent.size() );
48 
49  try
50  {
51  data_set.set_file_ids( offsets, start_offset, buffer_size, data_type );
52  }
53  catch( ReadHDF5Dataset::Exception& )
54  {
55  return MB_FAILURE;
56  }
57 
58  dbgOut.printf( 3, "Reading %s in %lu chunks\n", data_set.get_debug_desc(), data_set.get_read_count() );
59 
60  while( !data_set.done() )
61  {
62  dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data_set.get_debug_desc() );
63  try
64  {
65  data_set.read( data_buffer, count );
66  }
67  catch( ReadHDF5Dataset::Exception& )
68  {
69  return MB_FAILURE;
70  }
71 
72  assert( 0 == count || fileid_iter != file_ids.end() );
73 
74  // Handle 'special' case where we read some, but not all
75  // of the data for an entity during the last iteration.
76  offset = 0;
77  if( !partial.empty() )
78  { // didn't read all of previous entity
79  assert( fileid_iter != file_ids.end() );
80  assert( 0 == ( partial.size() % value_size ) );
81  size_t num_prev = partial.size() / value_size;
82  offset = *count_iter - num_prev;
83  if( offset > count )
84  { // still don't have all
85  partial.insert( partial.end(), data_buffer, data_buffer + count * value_size );
86  continue;
87  }
88 
89  partial.insert( partial.end(), data_buffer, data_buffer + offset * value_size );
90 
91  ranged = is_ranged( *fileid_iter, ranged_iter, ranged_file_ids.end() );
92  assert( partial.size() == *count_iter * value_size );
93  rval = store_data( *fileid_iter, &partial[0], *count_iter, ranged );
94  if( MB_SUCCESS != rval ) return rval;
95 
96  ++count_iter;
97  ++fileid_iter;
98  partial.clear();
99  }
100 
101  // Process contents for all entities for which we
102  // have read the complete list
103  while( count_iter != vals_per_ent.end() && offset + *count_iter <= count )
104  {
105  assert( fileid_iter != file_ids.end() );
106  ranged = is_ranged( *fileid_iter, ranged_iter, ranged_file_ids.end() );
107  rval = store_data( *fileid_iter, data_buffer + offset * value_size, *count_iter, ranged );
108  if( MB_SUCCESS != rval ) return rval;
109 
110  offset += *count_iter;
111  ++count_iter;
112  ++fileid_iter;
113  }
114 
115  // If we did not read all of the final entity,
116  // store what we did read to be processed in the
117  // next iteration
118  if( offset < count )
119  {
120  assert( partial.empty() );
121  partial.insert( partial.end(), data_buffer + offset * value_size, data_buffer + count * value_size );
122  }
123  }
124  // NOTE: If the last set is empty, we will not process it here
125  // assert(fileid_iter == file_ids.end());
126 #ifndef NDEBUG
127  for( ; fileid_iter != file_ids.end(); ++fileid_iter )
128  {
129  assert( 0 == *count_iter );
130  ++count_iter;
131  }
132 #endif
133  return MB_SUCCESS;
134 }

References moab::Range::begin(), bufferSize, dataBuffer, dbgOut, moab::ReadHDF5Dataset::done(), moab::Range::end(), ErrorCode, moab::ReadHDF5Dataset::get_debug_desc(), moab::ReadHDF5Dataset::get_read_count(), is_ranged(), MB_SUCCESS, moab::DebugOutput::printf(), moab::ReadHDF5Dataset::read(), moab::ReadHDF5Dataset::set_file_ids(), moab::Range::size(), and store_data().

Referenced by read().

◆ read_offsets()

ErrorCode moab::ReadHDF5VarLen::read_offsets ( ReadHDF5Dataset data_set,
const Range file_ids,
EntityHandle  start_file_id,
EntityHandle  nudge,
Range offsets_out,
std::vector< unsigned > &  counts_out 
)

Read set description table or offset vector for var-len tags or old-format poly(gon|hedra) connectivity.

Parameters
data_setThe data set to read.
file_idsThe file ids of the entities to read.
start_file_idThe file id corresponding to the first row of the dataset
num_columnsThe number of columns of offsets in the dataset
indicesArray of length num_columns contaning the indices of the columns to read.
nudgeAmount by which to offset values in offset_out to avoid putting zeros in Range. Must be greater than 0. Probably 1.
offsets_outAn array of length num_columns which will be populated with the resulting list of offsets into the contents list calculated from reading the offsets from the passed data set.
counts_outAn array of length num_columns of std::vectors, where each vector will be filled with one value per file ID indicating the length of the data for the corresponding file ID.
ranged_file_idsIf non-null, the last column of the table will be read and tested for the ranged bit. For all file_ids for which the range bit is set, the file ID will be added to this list.

Definition at line 291 of file ReadHDF5VarLen.cpp.

297 {
298 
299  // Use hints to make sure insertion into ranges is O(1)
300  offsets_out.clear();
301  counts_out.clear();
302  counts_out.reserve( file_ids.size() );
303  Range::iterator hint;
304 
305  // Calculate which rows we need to read from the offsets table
306  Range rows;
307  hint = rows.begin();
308  Range::const_pair_iterator pair = file_ids.const_pair_begin();
309  // special case if reading first entity in dataset, because
310  // there is no previous end value.
311  if( pair != file_ids.const_pair_end() && pair->first == start_file_id )
312  {
313  hint = rows.insert( nudge, pair->second - start_file_id + nudge );
314  ++pair;
315  }
316  while( pair != file_ids.const_pair_end() )
317  {
318  hint = rows.insert( hint, pair->first - start_file_id + nudge - 1, pair->second - start_file_id + nudge );
319  ++pair;
320  }
321 
322  // set up read of offsets dataset
323  hsize_t buffer_size = bufferSize / sizeof( hssize_t );
324  hssize_t* buffer = reinterpret_cast< hssize_t* >( dataBuffer );
325  data_set.set_file_ids( rows, nudge, buffer_size, H5T_NATIVE_HSSIZE );
326  hssize_t prev_end;
327  bool have_prev_end = false;
328  // If we're reading the first row of the table, then the
329  // previous end is implicitly -1.
330  if( !file_ids.empty() && file_ids.front() == start_file_id )
331  {
332  prev_end = -1;
333  have_prev_end = true;
334  }
335 
336  dbgOut.printf( 3, "Reading %s in %lu chunks\n", data_set.get_debug_desc(), data_set.get_read_count() );
337 
338  // read offset table
339  size_t count, offset;
340  Range::const_iterator fiter = file_ids.begin();
341  hint = offsets_out.begin();
342  int nn = 0;
343  while( !data_set.done() )
344  {
345  dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data_set.get_debug_desc() );
346  try
347  {
348  data_set.read( buffer, count );
349  }
350  catch( ReadHDF5Dataset::Exception& )
351  {
352  return MB_FAILURE;
353  }
354  if( !count ) // might have been NULL read for collective IO
355  continue;
356 
357  // If the previous end values were read in the previous iteration,
358  // then they're stored in prev_end.
359  offset = 0;
360  if( have_prev_end )
361  {
362  counts_out.push_back( buffer[0] - prev_end );
363  hint = offsets_out.insert( hint, prev_end + 1 + nudge, buffer[0] + nudge );
364  ++fiter;
365  offset = 1;
366  have_prev_end = false;
367  }
368 
369  while( offset < count )
370  {
371  assert( fiter != file_ids.end() );
372  // whenever we get to a gap between blocks we need to
373  // advance one step because we read an extra end id
374  // preceding teah block
375  if( fiter == fiter.start_of_block() )
376  {
377  if( offset == count - 1 ) break;
378  ++offset;
379  }
380 
381  size_t s = buffer[offset - 1] + 1;
382  size_t e = buffer[offset];
383  counts_out.push_back( e - s + 1 );
384  hint = offsets_out.insert( hint, s + nudge, e + nudge );
385 
386  ++fiter;
387  ++offset;
388  }
389 
390  // If we did not end on the boundary between two blocks,
391  // then we need to save the end indices for the final entry
392  // for use in the next iteration. Similarly, if we ended
393  // with extra values that were read with the express intention
394  // of getting the previous end values for a block, we need to
395  // save them. This case only arises if we hit the break in
396  // the above loop.
397  if( fiter != fiter.start_of_block() || offset < count )
398  {
399  assert( !have_prev_end );
400  if( offset == count )
401  {
402  --offset;
403  assert( fiter != fiter.start_of_block() );
404  }
405  else
406  {
407  assert( offset + 1 == count );
408  assert( fiter == fiter.start_of_block() );
409  }
410  have_prev_end = true;
411  prev_end = buffer[offset];
412  }
413  }
414  assert( !have_prev_end );
415  assert( fiter == file_ids.end() );
416 
417  return MB_SUCCESS;
418 }

References moab::Range::begin(), buffer, bufferSize, moab::Range::clear(), moab::Range::const_pair_begin(), moab::Range::const_pair_end(), dataBuffer, dbgOut, moab::ReadHDF5Dataset::done(), moab::Range::empty(), moab::Range::end(), moab::Range::front(), moab::ReadHDF5Dataset::get_debug_desc(), moab::ReadHDF5Dataset::get_read_count(), moab::Range::insert(), MB_SUCCESS, moab::DebugOutput::printf(), moab::ReadHDF5Dataset::read(), moab::ReadHDF5Dataset::set_file_ids(), moab::Range::size(), and moab::Range::const_iterator::start_of_block().

Referenced by read().

◆ store_data()

virtual ErrorCode moab::ReadHDF5VarLen::store_data ( EntityHandle  file_id,
void *  data,
long  num_data,
bool  ranged 
)
protectedpure virtual

Store data list for a single entity.

The is the pure virtual method that must be provided. It is responsible for storing the data read for a single entity.

This function will always be called in the order of the file_ids in the range passed to the read method.

Parameters
file_idThe file ID for the entity
dataA pointer to the data for the entity
num_dataNumber of values for the entity
rangedFor set contents, true if in ranged format.

Referenced by read_data().

Member Data Documentation

◆ bufferSize

const size_t moab::ReadHDF5VarLen::bufferSize
private

Definition at line 39 of file ReadHDF5VarLen.hpp.

Referenced by read_data(), and read_offsets().

◆ dataBuffer

void* const moab::ReadHDF5VarLen::dataBuffer
private

Definition at line 38 of file ReadHDF5VarLen.hpp.

Referenced by read_data(), and read_offsets().

◆ dbgOut

DebugOutput& moab::ReadHDF5VarLen::dbgOut
protected

Definition at line 35 of file ReadHDF5VarLen.hpp.

Referenced by read_data(), and read_offsets().


The documentation for this class was generated from the following files: