Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
ReadHDF5Dataset.cpp
Go to the documentation of this file.
1 /** \file ReadHDF5Dataset.cpp
2  * \author Jason Kraftcheck
3  * \date 2010-07-09
4  */
5 
6 #include <cassert>
7 #include <cstring>
8 #include <cstdlib>
9 #include <iostream>
10 #include "moab/MOABConfig.h"
11 #include "ReadHDF5Dataset.hpp"
12 
13 #include "moab_mpe.h"
14 
15 #include <H5Dpublic.h>
16 #include <H5Tpublic.h>
17 #include <H5Ppublic.h>
18 #include <H5Spublic.h>
19 #ifdef MOAB_HAVE_HDF5_PARALLEL
20 #include <H5FDmpi.h>
21 #include <H5FDmpio.h>
22 #endif
23 
24 #define HDF5_16API ( H5_VERS_MAJOR < 2 && H5_VERS_MINOR < 8 )
25 
26 namespace moab
27 {
28 
29 // Selection of hyperslabs appears to be superlinear. Don't try to select
30 // more than a few thousand at a time or things start to get real slow.
34 {
36 }
37 
38 H5S_seloper_t ReadHDF5Dataset::hyperslabSelectOp = H5S_SELECT_OR;
39 
40 #ifdef MOAB_HAVE_LIBMPE
41 static std::pair< int, int > allocate_mpe_state( const char* name, const char* color )
42 {
43  std::pair< int, int > result;
44  result.first = MPE_Log_get_event_number();
45  result.second = MPE_Log_get_event_number();
46  MPE_Describe_state( result.first, result.second, name, color );
47  return result;
48 }
49 #else
50 static std::pair< int, int > allocate_mpe_state( const char*, const char* )
51 {
52  return std::pair< int, int >();
53 }
54 #endif
55 
57 std::pair< int, int > ReadHDF5Dataset::mpeReadEvent;
58 std::pair< int, int > ReadHDF5Dataset::mpeReduceEvent;
59 
60 ReadHDF5Dataset::ReadHDF5Dataset( const char* debug_desc, bool parallel, const Comm* communicator )
61  : closeDataSet( false ), dataSet( -1 ), dataSpace( -1 ), dataType( -1 ), fileType( -1 ), ioProp( H5P_DEFAULT ),
62  dataSpaceRank( 0 ), rowsInTable( 0 ), doConversion( false ), nativeParallel( parallel ), readCount( 0 ),
63  bufferSize( 0 ), mpiComm( communicator ), mpeDesc( debug_desc )
64 {
65  if( !haveMPEEvents )
66  {
67  haveMPEEvents = true;
68  mpeReadEvent = allocate_mpe_state( "ReadHDF5Dataset::read", "yellow" );
69  mpeReduceEvent = allocate_mpe_state( "ReadHDF5Dataset::all_reduce", "yellow" );
70  }
71 
72 #ifndef MOAB_HAVE_HDF5_PARALLEL
73  if( nativeParallel ) throw Exception( __LINE__ );
74 #else
75  if( nativeParallel && !mpiComm ) throw Exception( __LINE__ );
76 
77  if( mpiComm )
78  {
79  ioProp = H5Pcreate( H5P_DATASET_XFER );
80  H5Pset_dxpl_mpio( ioProp, H5FD_MPIO_COLLECTIVE );
81  }
82 #endif
83 }
84 
85 ReadHDF5Dataset::ReadHDF5Dataset( const char* debug_desc,
86  hid_t data_set_handle,
87  bool parallel,
88  const Comm* communicator,
89  bool close_data_set )
90  : closeDataSet( close_data_set ), dataSet( data_set_handle ), dataSpace( -1 ), dataType( -1 ), fileType( -1 ),
91  ioProp( H5P_DEFAULT ), dataSpaceRank( 0 ), rowsInTable( 0 ), doConversion( false ), nativeParallel( parallel ),
92  readCount( 0 ), bufferSize( 0 ), mpiComm( communicator ), mpeDesc( debug_desc )
93 {
94  if( !haveMPEEvents )
95  {
96  haveMPEEvents = true;
97  mpeReadEvent = allocate_mpe_state( "ReadHDF5Dataset::read", "yellow" );
98  mpeReduceEvent = allocate_mpe_state( "ReadHDF5Dataset::all_reduce", "yellow" );
99  }
100 
101  init( data_set_handle, close_data_set );
102 
103 #ifndef MOAB_HAVE_HDF5_PARALLEL
104  if( nativeParallel ) throw Exception( __LINE__ );
105 #else
106  if( nativeParallel && !mpiComm ) throw Exception( __LINE__ );
107 
108  if( mpiComm )
109  {
110  ioProp = H5Pcreate( H5P_DATASET_XFER );
111  H5Pset_dxpl_mpio( ioProp, H5FD_MPIO_COLLECTIVE );
112  }
113 #endif
114 }
115 
116 void ReadHDF5Dataset::init( hid_t data_set_handle, bool close_data_set )
117 {
118  closeDataSet = close_data_set;
119  dataSet = data_set_handle;
120 
121  fileType = H5Dget_type( data_set_handle );
122  if( fileType < 0 ) throw Exception( __LINE__ );
123 
124  dataSpace = H5Dget_space( dataSet );
125  if( dataSpace < 0 ) throw Exception( __LINE__ );
126 
127  dataSpaceRank = H5Sget_simple_extent_dims( dataSpace, dataSetCount, dataSetOffset );
128  if( dataSpaceRank < 0 ) throw Exception( __LINE__ );
130 
131  for( int i = 0; i < dataSpaceRank; ++i )
132  dataSetOffset[i] = 0;
133 
135 }
136 
137 unsigned ReadHDF5Dataset::columns() const
138 {
139  if( dataSpaceRank == 1 )
140  return 1;
141  else if( dataSpaceRank == 2 )
142  return dataSetCount[1];
143 
144  throw Exception( __LINE__ );
145 }
146 
147 void ReadHDF5Dataset::set_column( unsigned column )
148 {
149  if( dataSpaceRank != 2 || column >= dataSetCount[1] ) throw Exception( __LINE__ );
150  dataSetCount[1] = 1;
151  dataSetOffset[1] = column;
152 }
153 
155 {
156  size_t slabs_remaining = hyperslabSelectionLimit;
157  size_t avail = bufferSize;
158  while( iter != rangeEnd && slabs_remaining )
159  {
160  size_t count = *( iter.end_of_block() ) - *iter + 1;
161  if( count >= avail )
162  {
163  iter += avail;
164  break;
165  }
166 
167  avail -= count;
168  iter += count;
169  --slabs_remaining;
170  }
171  return iter;
172 }
173 
174 void ReadHDF5Dataset::set_file_ids( const Range& file_ids, EntityHandle start_id, hsize_t row_count, hid_t data_type )
175 {
176  startID = start_id;
177  currOffset = file_ids.begin();
178  rangeEnd = file_ids.end();
179  readCount = 0;
180  bufferSize = row_count;
181 
182  // if a) user specified buffer size and b) we're doing a true
183  // parallel partial read and c) we're doing collective I/O, then
184  // we need to know the maximum number of reads that will be done.
185 #ifdef MOAB_HAVE_HDF5_PARALLEL
186  if( nativeParallel )
187  {
189  while( iter != rangeEnd )
190  {
191  ++readCount;
192  iter = next_end( iter );
193  }
194 
195  MPE_Log_event( mpeReduceEvent.first, (int)readCount, mpeDesc.c_str() );
196  unsigned long recv = readCount, send = readCount;
197  MPI_Allreduce( &send, &recv, 1, MPI_UNSIGNED_LONG, MPI_MAX, *mpiComm );
198  readCount = recv;
199  MPE_Log_event( mpeReduceEvent.second, (int)readCount, mpeDesc.c_str() );
200  }
201 #endif
202 
203  dataType = data_type;
204  htri_t equal = H5Tequal( fileType, dataType );
205  if( equal < 0 ) throw Exception( __LINE__ );
206  doConversion = !equal;
207 
208  // We always read in the format of the file to avoid stupind HDF5
209  // library behavior when reading in parallel. We call H5Tconvert
210  // ourselves to do the data conversion. If the type we're reading
211  // from the file is larger than the type we want in memory, then
212  // we need to reduce num_rows so that we can read the larger type
213  // from the file into the passed buffer mean to accomodate num_rows
214  // of values of the smaller in-memory type.
215  if( doConversion )
216  {
217  size_t mem_size, file_size;
218  mem_size = H5Tget_size( dataType );
219  file_size = H5Tget_size( fileType );
220  if( file_size > mem_size ) bufferSize = bufferSize * mem_size / file_size;
221  }
222 }
223 
224 void ReadHDF5Dataset::set_all_file_ids( hsize_t row_count, hid_t data_type )
225 {
228  set_file_ids( internalRange, 1, row_count, data_type );
229 }
230 
232 {
233  if( fileType >= 0 ) H5Tclose( fileType );
234  if( dataSpace >= 0 ) H5Sclose( dataSpace );
235  if( closeDataSet && dataSet >= 0 ) H5Dclose( dataSet );
236  dataSpace = dataSet = -1;
237  if( ioProp != H5P_DEFAULT ) H5Pclose( ioProp );
238 }
239 
240 void ReadHDF5Dataset::read( void* buffer, size_t& rows_read )
241 {
242  herr_t err;
243  rows_read = 0;
244 
245  MPE_Log_event( mpeReadEvent.first, (int)readCount, mpeDesc.c_str() );
246  if( currOffset != rangeEnd )
247  {
248 
249  // Build H5S hyperslab selection describing the portions of the
250  // data set to read
251  H5S_seloper_t sop = H5S_SELECT_SET;
252  Range::iterator new_end = next_end( currOffset );
253  while( currOffset != new_end )
254  {
255  size_t count = *( currOffset.end_of_block() ) - *currOffset + 1;
256  if( new_end != rangeEnd && *currOffset + count > *new_end )
257  {
258  count = *new_end - *currOffset;
259  }
260  rows_read += count;
261 
263  dataSetCount[0] = count;
264  err = H5Sselect_hyperslab( dataSpace, sop, dataSetOffset, NULL, dataSetCount, 0 );
265  if( err < 0 ) throw Exception( __LINE__ );
266  sop = hyperslabSelectOp; // subsequent calls to select_hyperslab append
267 
268  currOffset += count;
269  }
270 
271  // Create a data space describing the memory in which to read the data
272  dataSetCount[0] = rows_read;
273  hid_t mem_id = H5Screate_simple( dataSpaceRank, dataSetCount, NULL );
274  if( mem_id < 0 ) throw Exception( __LINE__ );
275 
276  // Do the actual read
277  err = H5Dread( dataSet, fileType, mem_id, dataSpace, ioProp, buffer );
278  H5Sclose( mem_id );
279  if( err < 0 ) throw Exception( __LINE__ );
280 
281  if( readCount ) --readCount;
282 
283  if( doConversion )
284  {
285  err = H5Tconvert( fileType, dataType, rows_read * columns(), buffer, 0, H5P_DEFAULT );
286  if( err < 0 ) throw Exception( __LINE__ );
287  }
288  }
289  else if( readCount )
290  {
291  null_read();
292  --readCount;
293  }
294  MPE_Log_event( mpeReadEvent.second, (int)readCount, mpeDesc.c_str() );
295 }
296 
298 {
299  herr_t err;
300  err = H5Sselect_none( dataSpace );
301  if( err < 0 ) throw Exception( __LINE__ );
302 
303  //#if HDF5_16API
304  hsize_t one = 1;
305  hid_t mem_id = H5Screate_simple( 1, &one, NULL );
306  if( mem_id < 0 ) throw Exception( __LINE__ );
307  err = H5Sselect_none( mem_id );
308  if( err < 0 )
309  {
310  H5Sclose( mem_id );
311  throw Exception( __LINE__ );
312  }
313  //#else
314  // hid_t mem_id = H5Screate(H5S_NULL);
315  // if (mem_id < 0)
316  // throw Exception(__LINE__);
317  //#endif
318 
319  err = H5Dread( dataSet, fileType, mem_id, dataSpace, ioProp, 0 );
320  H5Sclose( mem_id );
321  if( err < 0 ) throw Exception( __LINE__ );
322 }
323 
324 } // namespace moab