Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
ReadHDF5.cpp
Go to the documentation of this file.
1 /**
2  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3  * storing and accessing finite element mesh data.
4  *
5  * Copyright 2004 Sandia Corporation. Under the terms of Contract
6  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7  * retains certain rights in this software.
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  */
15 
16 //-------------------------------------------------------------------------
17 // Filename : ReadHDF5.cpp
18 //
19 // Purpose : HDF5 Writer
20 //
21 // Creator : Jason Kraftcheck
22 //
23 // Creation Date : 04/18/04
24 //-------------------------------------------------------------------------
25 
26 #include <cassert>
27 #include "moab/MOABConfig.h"
28 /* Include our MPI header before any HDF5 because otherwise
29  it will get included indirectly by HDF5 */
30 #ifdef MOAB_HAVE_MPI
31 #include "moab_mpi.h"
32 #include "moab/ParallelComm.hpp"
33 #endif
34 #include <H5Tpublic.h>
35 #include <H5Ppublic.h>
36 #include <H5Epublic.h>
37 #include "moab/Interface.hpp"
38 #include "Internals.hpp"
39 #include "MBTagConventions.hpp"
40 #include "ReadHDF5.hpp"
41 #include "moab/CN.hpp"
42 #include "moab/FileOptions.hpp"
43 #include "moab/CpuTimer.hpp"
44 #ifdef MOAB_HAVE_HDF5_PARALLEL
45 #include <H5FDmpi.h>
46 #include <H5FDmpio.h>
47 #endif
48 //#include "WriteHDF5.hpp"
49 
50 #include <cstdlib>
51 #include <cstring>
52 #include <limits>
53 #include <functional>
54 #include <iostream>
55 
56 #include "IODebugTrack.hpp"
57 #include "ReadHDF5Dataset.hpp"
58 #include "ReadHDF5VarLen.hpp"
59 #include "moab_mpe.h"
60 
61 namespace moab
62 {
63 
64 /* If true, coordinates are read in blocked format (all X values before
65  * Y values before Z values.) If undefined, then all coordinates for a
66  * given vertex are read at the same time.
67  */
68 const bool DEFAULT_BLOCKED_COORDINATE_IO = false;
69 
70 /* If true, file is opened first by root node only to read summary,
71  * file is the closed and the summary is broadcast to all nodes, after
72  * which all nodes open file in parallel to read data. If undefined,
73  * file is opened once in parallel and all nodes read summary data.
74  */
75 const bool DEFAULT_BCAST_SUMMARY = true;
76 
77 /* If true and all processors are to read the same block of data,
78  * read it on one and broadcast to others rather than using collective
79  * io
80  */
82 
83 #define READ_HDF5_BUFFER_SIZE ( 128 * 1024 * 1024 )
84 
85 #define assert_range( PTR, CNT ) \
86  assert( ( PTR ) >= (void*)dataBuffer ); \
87  assert( ( ( PTR ) + ( CNT ) ) <= (void*)( dataBuffer + bufferSize ) );
88 
89 // Call \c error function during HDF5 library errors to make
90 // it easier to trap such errors in the debugger. This function
91 // gets registered with the HDF5 library as a callback. It
92 // works the same as the default (H5Eprint), except that it
93 // also calls the \c error function as a no-op.
94 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
95 static herr_t handle_hdf5_error( hid_t stack, void* data )
96 {
97  ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
98  herr_t result = 0;
99  if( h->func ) result = ( *h->func )( stack, h->data );MB_CHK_ERR_CONT( MB_FAILURE );
100  return result;
101 }
102 #else
103 static herr_t handle_hdf5_error( void* data )
104 {
105  ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
106  herr_t result = 0;
107  if( h->func ) result = ( *h->func )( h->data );MB_CHK_ERR_CONT( MB_FAILURE );
108  return result;
109 }
110 #endif
111 
112 static void copy_sorted_file_ids( const EntityHandle* sorted_ids, long num_ids, Range& results )
113 {
114  Range::iterator hint = results.begin();
115  long i = 0;
116  while( i < num_ids )
117  {
118  EntityHandle start = sorted_ids[i];
119  for( ++i; i < num_ids && sorted_ids[i] == 1 + sorted_ids[i - 1]; ++i )
120  ;
121  hint = results.insert( hint, start, sorted_ids[i - 1] );
122  }
123 }
124 
125 static void intersect( const mhdf_EntDesc& group, const Range& range, Range& result )
126 {
128  s = Range::lower_bound( range.begin(), range.end(), group.start_id );
129  e = Range::lower_bound( s, range.end(), group.start_id + group.count );
130  result.merge( s, e );
131 }
132 
133 #define debug_barrier() debug_barrier_line( __LINE__ )
135 {
136 #ifdef MOAB_HAVE_MPI
137  if( mpiComm )
138  {
139  const unsigned threshold = 2;
140  static unsigned long count = 0;
141  if( dbgOut.get_verbosity() >= threshold )
142  {
143  dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
144  MPI_Barrier( *mpiComm );
145  }
146  }
147 #else
148  if( lineno )
149  {
150  }
151 #endif
152 }
153 
155 {
156  int fileline;
159 
160  public:
162  : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
163  {
164  }
166  {
167  int new_count = mhdf_countOpenHandles( handle );
168  if( new_count != enter_count )
169  {
170  std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
171  << "Open at entrance: " << enter_count << std::endl
172  << "Open at exit: " << new_count << std::endl;
173  }
174  }
175 };
176 
177 #ifdef NDEBUG
178 #define CHECK_OPEN_HANDLES
179 #else
180 #define CHECK_OPEN_HANDLES CheckOpenReadHDF5Handles check_open_handles_( filePtr, __LINE__ )
181 #endif
182 
184 {
185  return new ReadHDF5( iface );
186 }
187 
189  : bufferSize( READ_HDF5_BUFFER_SIZE ), dataBuffer( NULL ), iFace( iface ), filePtr( 0 ), fileInfo( NULL ),
190  readUtil( NULL ), handleType( 0 ), indepIO( H5P_DEFAULT ), collIO( H5P_DEFAULT ), myPcomm( NULL ),
191  debugTrack( false ), dbgOut( stderr ), nativeParallel( false ), mpiComm( NULL ),
192  blockedCoordinateIO( DEFAULT_BLOCKED_COORDINATE_IO ), bcastSummary( DEFAULT_BCAST_SUMMARY ),
193  bcastDuplicateReads( DEFAULT_BCAST_DUPLICATE_READS ), setMeta( 0 ), timer( NULL ), cputime( false )
194 {
195 }
196 
198 {
199  ErrorCode rval;
200 
201  if( readUtil ) return MB_SUCCESS;
202 
203  indepIO = collIO = H5P_DEFAULT;
204  // WriteHDF5::register_known_tag_types(iFace);
205 
206  handleType = H5Tcopy( H5T_NATIVE_ULONG );
207  if( handleType < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
208 
209  if( H5Tset_size( handleType, sizeof( EntityHandle ) ) < 0 )
210  {
211  H5Tclose( handleType );
212  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
213  }
214 
215  rval = iFace->query_interface( readUtil );
216  if( MB_SUCCESS != rval )
217  {
218  H5Tclose( handleType );
219  MB_SET_ERR( rval, "ReadHDF5 Failure" );
220  }
221 
222  idMap.clear();
223  fileInfo = 0;
224  debugTrack = false;
225  myPcomm = 0;
226 
227  return MB_SUCCESS;
228 }
229 
231 {
232  if( !readUtil ) // init() failed.
233  return;
234 
235  delete[] setMeta;
236  setMeta = 0;
238  H5Tclose( handleType );
239 }
240 
241 ErrorCode ReadHDF5::set_up_read( const char* filename, const FileOptions& opts )
242 {
243  ErrorCode rval;
244  mhdf_Status status;
245  indepIO = collIO = H5P_DEFAULT;
246  mpiComm = 0;
247 
248  if( MB_SUCCESS != init() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
249 
250 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
251  herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
252 #else
253  herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
254 #endif
255  if( err < 0 )
256  {
257  errorHandler.func = 0;
258  errorHandler.data = 0;
259  }
260  else
261  {
262 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
263  err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
264 #else
265  err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
266 #endif
267  if( err < 0 )
268  {
269  errorHandler.func = 0;
270  errorHandler.data = 0;
271  }
272  }
273 
274  // Set up debug output
275  int tmpval;
276  if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) )
277  {
278  dbgOut.set_verbosity( tmpval );
279  dbgOut.set_prefix( "H5M " );
280  }
282 
283  // Enable some extra checks for reads. Note: amongst other things this
284  // will print errors if the entire file is not read, so if doing a
285  // partial read that is not a parallel read, this should be disabled.
286  debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
287 
288  opts.get_toggle_option( "BLOCKED_COORDINATE_IO", DEFAULT_BLOCKED_COORDINATE_IO, blockedCoordinateIO );
289  opts.get_toggle_option( "BCAST_SUMMARY", DEFAULT_BCAST_SUMMARY, bcastSummary );
290  opts.get_toggle_option( "BCAST_DUPLICATE_READS", DEFAULT_BCAST_DUPLICATE_READS, bcastDuplicateReads );
291 
292  // Handle parallel options
293  bool use_mpio = ( MB_SUCCESS == opts.get_null_option( "USE_MPIO" ) );
294  rval = opts.match_option( "PARALLEL", "READ_PART" );
295  bool parallel = ( rval != MB_ENTITY_NOT_FOUND );
296  nativeParallel = ( rval == MB_SUCCESS );
297  if( use_mpio && !parallel )
298  {
299  MB_SET_ERR( MB_NOT_IMPLEMENTED, "'USE_MPIO' option specified w/out 'PARALLEL' option" );
300  }
301 
302  // This option is intended for testing purposes only, and thus
303  // is not documented anywhere. Decreasing the buffer size can
304  // expose bugs that would otherwise only be seen when reading
305  // very large files.
306  rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
307  if( MB_SUCCESS != rval )
308  {
310  }
311  else if( bufferSize < (int)std::max( sizeof( EntityHandle ), sizeof( void* ) ) )
312  {
314  }
315 
316  dataBuffer = (char*)malloc( bufferSize );
318 
319  if( use_mpio || nativeParallel )
320  {
321 
322 #ifndef MOAB_HAVE_HDF5_PARALLEL
323  free( dataBuffer );
324  dataBuffer = NULL;
325  MB_SET_ERR( MB_NOT_IMPLEMENTED, "MOAB not configured with parallel HDF5 support" );
326 #else
327  MPI_Info info = MPI_INFO_NULL;
328  std::string cb_size;
329  rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
330  if( MB_SUCCESS == rval )
331  {
332  MPI_Info_create( &info );
333  MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
334  }
335 
336  int pcomm_no = 0;
337  rval = opts.get_int_option( "PARALLEL_COMM", pcomm_no );
338  if( rval == MB_TYPE_OUT_OF_RANGE )
339  {
340  MB_SET_ERR( rval, "Invalid value for PARALLEL_COMM option" );
341  }
342  myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
343  if( 0 == myPcomm )
344  {
345  myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD );
346  }
347  const int rank = myPcomm->proc_config().proc_rank();
348  dbgOut.set_rank( rank );
350  mpiComm = new MPI_Comm( myPcomm->proc_config().proc_comm() );
351 
352 #ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
353  dbgOut.print( 1, "H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n" );
354 #endif
355 
356  // Open the file in serial on root to read summary
357  dbgOut.tprint( 1, "Getting file summary\n" );
358  fileInfo = 0;
359 
360  hid_t file_prop;
361  if( bcastSummary )
362  {
363  unsigned long size = 0;
364  if( rank == 0 )
365  {
366  file_prop = H5Pcreate( H5P_FILE_ACCESS );
367  err = H5Pset_fapl_mpio( file_prop, MPI_COMM_SELF, MPI_INFO_NULL );
368  assert( file_prop >= 0 );
369  assert( err >= 0 );
370  filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
371  H5Pclose( file_prop );
372 
373  if( filePtr )
374  {
376  0 ); // no extra set info
377  if( !is_error( status ) )
378  {
380  fileInfo->offset = (unsigned char*)fileInfo;
381  }
382  }
383  mhdf_closeFile( filePtr, &status );
384  if( fileInfo && mhdf_isError( &status ) )
385  {
386  free( fileInfo );
387  fileInfo = NULL;
388  }
389  }
390 
391  dbgOut.tprint( 1, "Communicating file summary\n" );
392  int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
393  if( mpi_err || !size ) return MB_FAILURE;
394 
395  if( rank != 0 ) fileInfo = reinterpret_cast< mhdf_FileDesc* >( malloc( size ) );
396 
397  MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );
398 
399  if( rank != 0 ) mhdf_fixFileDesc( fileInfo, reinterpret_cast< mhdf_FileDesc* >( fileInfo->offset ) );
400  }
401 
402  file_prop = H5Pcreate( H5P_FILE_ACCESS );
403  err = H5Pset_fapl_mpio( file_prop, myPcomm->proc_config().proc_comm(), info );
404  assert( file_prop >= 0 );
405  assert( err >= 0 );
406 
407  collIO = H5Pcreate( H5P_DATASET_XFER );
408  assert( collIO > 0 );
409  err = H5Pset_dxpl_mpio( collIO, H5FD_MPIO_COLLECTIVE );
410  assert( err >= 0 );
411  indepIO = nativeParallel ? H5P_DEFAULT : collIO;
412 
413  // Re-open file in parallel
414  dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", filename );
415  filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
416 
417  H5Pclose( file_prop );
418  if( !filePtr )
419  {
420  free( dataBuffer );
421  dataBuffer = NULL;
422  H5Pclose( indepIO );
423  if( collIO != indepIO ) H5Pclose( collIO );
424  collIO = indepIO = H5P_DEFAULT;
425  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
426  }
427 
428  if( !bcastSummary )
429  {
431  if( is_error( status ) )
432  {
433  free( dataBuffer );
434  dataBuffer = NULL;
435  mhdf_closeFile( filePtr, &status );
436  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
437  }
438  }
439 #endif // HDF5_PARALLEL
440  }
441  else
442  {
443  // Open the file
444  filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
445  if( !filePtr )
446  {
447  free( dataBuffer );
448  dataBuffer = NULL;
449  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
450  }
451 
452  // Get file info
454  if( is_error( status ) )
455  {
456  free( dataBuffer );
457  dataBuffer = NULL;
458  mhdf_closeFile( filePtr, &status );
459  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
460  }
461  }
462 
464  int hslimit;
465  rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
466  if( MB_SUCCESS == rval && hslimit > 0 )
468  else
470  if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
471  ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
472  {
474  if( MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ) )
475  ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits< int >::max() );
476  dbgOut.print( 1, "Using H5S_APPEND for hyperslab selection\n" );
477  }
478 
479  return MB_SUCCESS;
480 }
481 
483 {
484  HDF5ErrorHandler handler;
485 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
486  herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
487 #else
488  herr_t err = H5Eget_auto( &handler.func, &handler.data );
489 #endif
490  if( err >= 0 && handler.func == &handle_hdf5_error )
491  {
492  assert( handler.data == &errorHandler );
493 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
494  H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
495 #else
496  H5Eset_auto( errorHandler.func, errorHandler.data );
497 #endif
498  }
499 
500  free( dataBuffer );
501  dataBuffer = NULL;
502  free( fileInfo );
503  fileInfo = NULL;
504  delete mpiComm;
505  mpiComm = 0;
506 
507  if( indepIO != H5P_DEFAULT ) H5Pclose( indepIO );
508  if( collIO != indepIO ) H5Pclose( collIO );
509  collIO = indepIO = H5P_DEFAULT;
510 
511  delete[] setMeta;
512  setMeta = 0;
513 
514  mhdf_Status status;
515  mhdf_closeFile( filePtr, &status );
516  filePtr = 0;
517  return is_error( status ) ? MB_FAILURE : MB_SUCCESS;
518 }
519 
520 ErrorCode ReadHDF5::load_file( const char* filename,
521  const EntityHandle* file_set,
522  const FileOptions& opts,
523  const ReaderIface::SubsetList* subset_list,
524  const Tag* file_id_tag )
525 {
526  ErrorCode rval;
527 
528  rval = set_up_read( filename, opts );
529  if( MB_SUCCESS != rval )
530  {
531  clean_up_read( opts );
532  return rval;
533  }
534  // See if we need to report times
535 
536  rval = opts.get_null_option( "CPUTIME" );
537  if( MB_SUCCESS == rval )
538  {
539  cputime = true;
540  timer = new CpuTimer;
541  for( int i = 0; i < NUM_TIMES; i++ )
542  _times[i] = 0;
543  }
544 
545  // We read the entire set description table regardless of partial
546  // or complete reads or serial vs parallel reads
547  rval = read_all_set_meta();
548 
550  if( subset_list && MB_SUCCESS == rval )
551  rval = load_file_partial( subset_list->tag_list, subset_list->tag_list_length, subset_list->num_parts,
552  subset_list->part_number, opts );
553  else
554  rval = load_file_impl( opts );
555 
556  if( MB_SUCCESS == rval && file_id_tag )
557  {
558  dbgOut.tprint( 1, "Storing file IDs in tag\n" );
559  rval = store_file_ids( *file_id_tag );
560  }
561  ErrorCode rval3 = opts.get_null_option( "STORE_SETS_FILEIDS" );
562  if( MB_SUCCESS == rval3 )
563  {
564  rval = store_sets_file_ids();
565  if( MB_SUCCESS != rval ) return rval;
566  }
567 
569 
570  if( MB_SUCCESS == rval && 0 != file_set )
571  {
572  dbgOut.tprint( 1, "Reading QA records\n" );
573  rval = read_qa( *file_set );
574  }
575 
577  dbgOut.tprint( 1, "Cleaning up\n" );
578  ErrorCode rval2 = clean_up_read( opts );
579  if( rval == MB_SUCCESS && rval2 != MB_SUCCESS ) rval = rval2;
580 
581  if( MB_SUCCESS == rval )
582  dbgOut.tprint( 1, "Read finished.\n" );
583  else
584  {
585  std::string msg;
586  iFace->get_last_error( msg );
587  dbgOut.tprintf( 1, "READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str() );
588  }
589 
590  if( cputime )
591  {
593  print_times();
594  delete timer;
595  }
596  if( H5P_DEFAULT != collIO ) H5Pclose( collIO );
597  if( H5P_DEFAULT != indepIO ) H5Pclose( indepIO );
598  collIO = indepIO = H5P_DEFAULT;
599 
600  return rval;
601 }
602 
604 {
605  ErrorCode rval;
606  mhdf_Status status;
607  int i;
608 
610 
611  dbgOut.tprint( 1, "Reading all nodes...\n" );
612  Range ids;
613  if( fileInfo->nodes.count )
614  {
616  rval = read_nodes( ids );
617  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
618  }
619 
620  dbgOut.tprint( 1, "Reading all element connectivity...\n" );
621  std::vector< int > polyhedra; // Need to do these last so that faces are loaded
622  for( i = 0; i < fileInfo->num_elem_desc; ++i )
623  {
625  {
626  polyhedra.push_back( i );
627  continue;
628  }
629 
630  rval = read_elems( i );
631  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
632  }
633  for( std::vector< int >::iterator it = polyhedra.begin(); it != polyhedra.end(); ++it )
634  {
635  rval = read_elems( *it );
636  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
637  }
638 
639  dbgOut.tprint( 1, "Reading all sets...\n" );
640  ids.clear();
641  if( fileInfo->sets.count )
642  {
644  rval = read_sets( ids );
645  if( rval != MB_SUCCESS )
646  {
647  MB_SET_ERR( rval, "ReadHDF5 Failure" );
648  }
649  }
650 
651  dbgOut.tprint( 1, "Reading all adjacencies...\n" );
652  for( i = 0; i < fileInfo->num_elem_desc; ++i )
653  {
654  if( !fileInfo->elems[i].have_adj ) continue;
655 
656  long table_len;
657  hid_t table = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &table_len, &status );
658  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
659 
660  rval = read_adjacencies( table, table_len );
661  mhdf_closeData( filePtr, table, &status );
662  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
663  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
664  }
665 
666  dbgOut.tprint( 1, "Reading all tags...\n" );
667  for( i = 0; i < fileInfo->num_tag_desc; ++i )
668  {
669  rval = read_tag( i );
670  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
671  }
672 
673  dbgOut.tprint( 1, "Core read finished. Cleaning up...\n" );
674  return MB_SUCCESS;
675 }
676 
677 ErrorCode ReadHDF5::find_int_tag( const char* name, int& index )
678 {
679  for( index = 0; index < fileInfo->num_tag_desc; ++index )
680  if( !strcmp( name, fileInfo->tags[index].name ) ) break;
681 
682  if( index == fileInfo->num_tag_desc )
683  {
684  MB_SET_ERR( MB_TAG_NOT_FOUND, "File does not contain subset tag '" << name << "'" );
685  }
686 
687  if( fileInfo->tags[index].type != mhdf_INTEGER || fileInfo->tags[index].size != 1 )
688  {
689  MB_SET_ERR( MB_TAG_NOT_FOUND, "Tag ' " << name << "' does not contain single integer value" );
690  }
691 
692  return MB_SUCCESS;
693 }
694 
695 ErrorCode ReadHDF5::get_subset_ids( const ReaderIface::IDTag* subset_list, int subset_list_length, Range& file_ids )
696 {
697  ErrorCode rval;
698 
699  for( int i = 0; i < subset_list_length; ++i )
700  {
701  int tag_index;
702  rval = find_int_tag( subset_list[i].tag_name, tag_index );
703  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
704 
705  Range tmp_file_ids;
706  if( !subset_list[i].num_tag_values )
707  {
708  rval = get_tagged_entities( tag_index, tmp_file_ids );
709  }
710  else
711  {
712  std::vector< int > ids( subset_list[i].tag_values,
713  subset_list[i].tag_values + subset_list[i].num_tag_values );
714  std::sort( ids.begin(), ids.end() );
715  rval = search_tag_values( tag_index, ids, tmp_file_ids );
716  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
717  }
718 
719  if( tmp_file_ids.empty() ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
720 
721  if( i == 0 )
722  file_ids.swap( tmp_file_ids );
723  else
724  file_ids = intersect( tmp_file_ids, file_ids );
725  }
726 
727  return MB_SUCCESS;
728 }
729 
730 ErrorCode ReadHDF5::get_partition( Range& tmp_file_ids, int num_parts, int part_number )
731 {
733 
734  // Check that the tag only identified sets
735  if( (unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() )
736  {
737  dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
738  tmp_file_ids.erase( tmp_file_ids.begin(), tmp_file_ids.lower_bound( (EntityHandle)fileInfo->sets.start_id ) );
739  }
740  unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
741  if( tmp_file_ids.back() >= set_end )
742  {
743  dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
744  tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ), tmp_file_ids.end() );
745  }
746 
747  Range::iterator s = tmp_file_ids.begin();
748  size_t num_per_proc = tmp_file_ids.size() / num_parts;
749  size_t num_extra = tmp_file_ids.size() % num_parts;
750  Range::iterator e;
751  if( part_number < (long)num_extra )
752  {
753  s += ( num_per_proc + 1 ) * part_number;
754  e = s;
755  e += ( num_per_proc + 1 );
756  }
757  else
758  {
759  s += num_per_proc * part_number + num_extra;
760  e = s;
761  e += num_per_proc;
762  }
763  tmp_file_ids.erase( e, tmp_file_ids.end() );
764  tmp_file_ids.erase( tmp_file_ids.begin(), s );
765 
766  return MB_SUCCESS;
767 }
768 
770  int subset_list_length,
771  int num_parts,
772  int part_number,
773  const FileOptions& opts )
774 {
775  mhdf_Status status;
776 
777  static MPEState mpe_event( "ReadHDF5", "yellow" );
778 
779  mpe_event.start( "gather parts" );
780 
782 
783  for( int i = 0; i < subset_list_length; ++i )
784  {
785  dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n", subset_list[i].tag_name,
786  subset_list[i].num_tag_values );
787  if( subset_list[i].num_tag_values )
788  {
789  assert( 0 != subset_list[i].tag_values );
790  dbgOut.printf( 2, " \"%s\" values = { %d", subset_list[i].tag_name, subset_list[i].tag_values[0] );
791  for( int j = 1; j < subset_list[i].num_tag_values; ++j )
792  dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
793  dbgOut.printf( 2, " }\n" );
794  }
795  }
796  if( num_parts ) dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", num_parts, part_number );
797 
798  dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
799 
800  Range file_ids;
801  ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
802  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
803 
805 
806  if( num_parts )
807  {
808  /*if (num_parts>(int)file_ids.size())
809  {
810  MB_SET_ERR(MB_FAILURE, "Only " << file_ids.size() << " parts to distribute to " <<
811  num_parts << " processes.");
812  }*/
813  rval = get_partition( file_ids, num_parts, part_number );
814  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
815  }
816 
818 
819  dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
820  mpe_event.end();
821  mpe_event.start( "gather related sets" );
822  dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
823 
824  enum RecusiveSetMode
825  {
826  RSM_NONE,
827  RSM_SETS,
828  RSM_CONTENTS
829  };
830  const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
831  int child_mode;
832  rval = opts.match_option( "CHILDREN", set_opts, child_mode );
833  if( MB_ENTITY_NOT_FOUND == rval )
834  child_mode = RSM_CONTENTS;
835  else if( MB_SUCCESS != rval )
836  {
837  MB_SET_ERR( rval, "Invalid value for 'CHILDREN' option" );
838  }
839  int content_mode;
840  rval = opts.match_option( "SETS", set_opts, content_mode );
841  if( MB_ENTITY_NOT_FOUND == rval )
842  content_mode = RSM_CONTENTS;
843  else if( MB_SUCCESS != rval )
844  {
845  MB_SET_ERR( rval, "Invalid value for 'SETS' option" );
846  }
847 
848  // If we want the contents of contained/child sets,
849  // search for them now (before gathering the non-set contents
850  // of the sets.)
851  Range sets;
852  intersect( fileInfo->sets, file_ids, sets );
853  if( content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS )
854  {
855  dbgOut.tprint( 1, " doing read_set_ids_recursive\n" );
856  rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
857  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
858  }
859 
861  debug_barrier();
862 
863  // Get elements and vertices contained in sets
864  dbgOut.tprint( 1, " doing get_set_contents\n" );
865  rval = get_set_contents( sets, file_ids );
866  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
867 
869 
870  dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
871  debug_barrier();
872  mpe_event.end();
873  dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
874 
875  // Figure out the maximum dimension of entity to be read
876  int max_dim = 0;
877  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
878  {
879  EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
880  if( type <= MBVERTEX || type >= MBENTITYSET )
881  {
882  assert( false ); // For debug code die for unknown element types
883  continue; // For release code, skip unknown element types
884  }
885  int dim = CN::Dimension( type );
886  if( dim > max_dim )
887  {
888  Range subset;
889  intersect( fileInfo->elems[i].desc, file_ids, subset );
890  if( !subset.empty() ) max_dim = dim;
891  }
892  }
893 #ifdef MOAB_HAVE_MPI
894  if( nativeParallel )
895  {
896  int send = max_dim;
897  MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
898  }
899 #endif
900 
901  // If input contained any polyhedra, then need to get faces
902  // of the polyhedra before the next loop because we need to
903  // read said faces in that loop.
904  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
905  {
906  EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
907  if( type != MBPOLYHEDRON ) continue;
908 
909  debug_barrier();
910  dbgOut.print( 2, " Getting polyhedra faces\n" );
911  mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
912 
913  Range polyhedra;
914  intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
915  rval = read_elems( i, polyhedra, &file_ids );
916  mpe_event.end( rval );
917  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
918  }
919 
921  // Get node file ids for all elements
922  Range nodes;
923  intersect( fileInfo->nodes, file_ids, nodes );
924  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
925  {
926  EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
927  if( type <= MBVERTEX || type >= MBENTITYSET )
928  {
929  assert( false ); // For debug code die for unknown element types
930  continue; // For release code, skip unknown element types
931  }
932  if( MBPOLYHEDRON == type ) continue;
933 
934  debug_barrier();
935  dbgOut.printf( 2, " Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
936 
937  Range subset;
938  intersect( fileInfo->elems[i].desc, file_ids, subset );
939  mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
940 
941  // If dimension is max_dim, then we can create the elements now
942  // so we don't have to read the table again later (connectivity
943  // will be fixed up after nodes are created when update_connectivity())
944  // is called. For elements of a smaller dimension, we just build
945  // the node ID range now because a) we'll have to read the whole
946  // connectivity table again later, and b) we don't want to worry
947  // about accidentally creating multiple copies of the same element.
948  if( CN::Dimension( type ) == max_dim )
949  rval = read_elems( i, subset, &nodes );
950  else
951  rval = read_elems( i, subset, nodes );
952  mpe_event.end( rval );
953  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
954  }
956  debug_barrier();
957  mpe_event.start( "read coords" );
958  dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", (unsigned long)nodes.size(),
959  (unsigned long)nodes.psize() );
960 
961  // Read node coordinates and create vertices in MOAB
962  // NOTE: This populates the RangeMap with node file ids,
963  // which is expected by read_node_adj_elems.
964  rval = read_nodes( nodes );
965  mpe_event.end( rval );
966  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
967 
969 
970  debug_barrier();
971  dbgOut.tprint( 1, "READING ELEMENTS\n" );
972 
973  // Decide if we need to read additional elements
974  enum SideMode
975  {
976  SM_EXPLICIT,
977  SM_NODES,
978  SM_SIDES
979  };
980  int side_mode;
981  const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
982  rval = opts.match_option( "ELEMENTS", options, side_mode );
983  if( MB_ENTITY_NOT_FOUND == rval )
984  {
985  // If only nodes were specified, then default to "NODES", otherwise
986  // default to "SIDES".
987  if( 0 == max_dim )
988  side_mode = SM_NODES;
989  else
990  side_mode = SM_SIDES;
991  }
992  else if( MB_SUCCESS != rval )
993  {
994  MB_SET_ERR( rval, "Invalid value for 'ELEMENTS' option" );
995  }
996 
997  if( side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/ )
998  {
999  // Read elements until we find something. Once we find something,
1000  // read only elements of the same dimension. NOTE: loop termination
1001  // criterion changes on both sides (max_dim can be changed in loop
1002  // body).
1003  for( int dim = 3; dim >= max_dim; --dim )
1004  {
1005  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
1006  {
1007  EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
1008  if( CN::Dimension( type ) == dim )
1009  {
1010  debug_barrier();
1011  dbgOut.tprintf( 2, " Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
1012  mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
1013  Range ents;
1014  rval = read_node_adj_elems( fileInfo->elems[i] );
1015  mpe_event.end( rval );
1016  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1017  if( !ents.empty() ) max_dim = 3;
1018  }
1019  }
1020  }
1021  }
1022 
1024  Range side_entities;
1025  if( side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/ )
1026  {
1027  if( 0 == max_dim ) max_dim = 4;
1028  // Now read any additional elements for which we've already read all
1029  // of the nodes.
1030  for( int dim = max_dim - 1; dim > 0; --dim )
1031  {
1032  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
1033  {
1034  EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
1035  if( CN::Dimension( type ) == dim )
1036  {
1037  debug_barrier();
1038  dbgOut.tprintf( 2, " Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
1039  mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
1040  rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
1041  mpe_event.end( rval );
1042  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1043  }
1044  }
1045  }
1046  }
1047 
1048  // We need to do this here for polyhedra to be handled correctly.
1049  // We have to wait until the faces are read in the above code block,
1050  // but need to create the connectivity before doing update_connectivity,
1051  // which might otherwise delete polyhedra faces.
1053 
1054  debug_barrier();
1055  dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
1056  mpe_event.start( "updating connectivity for elements read before vertices" );
1057  rval = update_connectivity();
1058  mpe_event.end();
1059  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1060 
1062 
1063  dbgOut.tprint( 1, "READING ADJACENCIES\n" );
1064  for( int i = 0; i < fileInfo->num_elem_desc; ++i )
1065  {
1066  if (fileInfo->elems[i].have_adj /*&&
1067  idMap.intersects(fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count) */)
1068  {
1069  mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
1070  long len;
1071  hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
1072  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1073 
1074  rval = read_adjacencies( th, len );
1075  mhdf_closeData( filePtr, th, &status );
1076  mpe_event.end( rval );
1077  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1078  }
1079  }
1080 
1082 
1083  // If doing ELEMENTS=SIDES then we need to delete any entities
1084  // that we read that aren't actually sides (e.g. an interior face
1085  // that connects two disjoint portions of the part). Both
1086  // update_connectivity and reading of any explicit adjacencies must
1087  // happen before this.
1088  if( side_mode == SM_SIDES )
1089  {
1090  debug_barrier();
1091  mpe_event.start( "cleaning up non-side lower-dim elements" );
1092  dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
1093  rval = delete_non_side_elements( side_entities );
1094  mpe_event.end( rval );
1095  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1096  }
1097 
1099 
1100  debug_barrier();
1101  dbgOut.tprint( 1, "READING SETS\n" );
1102 
1103  // If reading contained/child sets but not their contents then find
1104  // them now. If we were also reading their contents we would
1105  // have found them already.
1106  if( content_mode == RSM_SETS || child_mode == RSM_SETS )
1107  {
1108  dbgOut.tprint( 1, " doing read_set_ids_recursive\n" );
1109  mpe_event.start( "finding recursively contained sets" );
1110  rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
1111  mpe_event.end( rval );
1112  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1113  }
1114 
1116 
1117  dbgOut.tprint( 1, " doing find_sets_containing\n" );
1118  mpe_event.start( "finding sets containing any read entities" );
1119 
1120  // Decide whether to read set-containing parents
1121  bool read_set_containing_parents = true;
1122  std::string tmp_opt;
1123  rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
1124  if( MB_SUCCESS == rval ) read_set_containing_parents = false;
1125 
1126  // Append file IDs of sets containing any of the nodes or elements
1127  // we've read up to this point.
1128  rval = find_sets_containing( sets, read_set_containing_parents );
1129  mpe_event.end( rval );
1130  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1131 
1133 
1134  // Now actually read all set data and instantiate sets in MOAB.
1135  // Get any contained sets out of file_ids.
1136  mpe_event.start( "reading set contents/parents/children" );
1137  EntityHandle first_set = fileInfo->sets.start_id;
1138  sets.merge( file_ids.lower_bound( first_set ), file_ids.lower_bound( first_set + fileInfo->sets.count ) );
1139  dbgOut.tprint( 1, " doing read_sets\n" );
1140  rval = read_sets( sets );
1141  mpe_event.end( rval );
1142  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1143 
1145 
1146  dbgOut.tprint( 1, "READING TAGS\n" );
1147 
1148  for( int i = 0; i < fileInfo->num_tag_desc; ++i )
1149  {
1150  mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
1151  rval = read_tag( i );
1152  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1153  }
1154 
1156 
1157  dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
1158 
1159  return MB_SUCCESS;
1160 }
1161 
1163  const std::vector< int >& sorted_values,
1164  Range& file_ids,
1165  bool sets_only )
1166 {
1167  ErrorCode rval;
1168  mhdf_Status status;
1169  std::vector< EntityHandle >::iterator iter;
1170  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
1171  long size;
1172  long start_id;
1173 
1175 
1176  debug_barrier();
1177 
1178  // Do dense data
1179 
1180  hid_t table;
1181  const char* name;
1182  std::vector< EntityHandle > indices;
1183  // These are probably in order of dimension, so iterate
1184  // in reverse order to make Range insertions more efficient.
1185  std::vector< int > grp_indices( tag.dense_elem_indices, tag.dense_elem_indices + tag.num_dense_indices );
1186  for( std::vector< int >::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i )
1187  {
1188  int idx = *i;
1189  if( idx == -2 )
1190  {
1191  name = mhdf_set_type_handle();
1192  start_id = fileInfo->sets.start_id;
1193  }
1194  else if( sets_only )
1195  {
1196  continue;
1197  }
1198  else if( idx == -1 )
1199  {
1200  name = mhdf_node_type_handle();
1201  start_id = fileInfo->nodes.start_id;
1202  }
1203  else
1204  {
1205  if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1206  name = fileInfo->elems[idx].handle;
1207  start_id = fileInfo->elems[idx].desc.start_id;
1208  }
1209  table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
1210  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1211  rval = search_tag_values( table, size, sorted_values, indices );
1212  mhdf_closeData( filePtr, table, &status );
1213  if( MB_SUCCESS != rval || is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1214  // Convert from table indices to file IDs and add to result list
1215  std::sort( indices.begin(), indices.end(), std::greater< EntityHandle >() );
1216  std::transform( indices.begin(), indices.end(), range_inserter( file_ids ),
1217  // std::bind1st(std::plus<long>(), start_id));
1218  std::bind( std::plus< long >(), start_id, std::placeholders::_1 ) );
1219  indices.clear();
1220  }
1221 
1222  if( !tag.have_sparse ) return MB_SUCCESS;
1223 
1224  // Do sparse data
1225 
1226  hid_t tables[2];
1227  long junk; // Redundant value for non-variable-length tags
1228  mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
1229  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1230  rval = search_tag_values( tables[1], size, sorted_values, indices );
1231  mhdf_closeData( filePtr, tables[1], &status );
1232  if( MB_SUCCESS != rval || is_error( status ) )
1233  {
1234  mhdf_closeData( filePtr, tables[0], &status );
1235  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1236  }
1237  // Convert to ranges
1238  std::sort( indices.begin(), indices.end() );
1239  std::vector< EntityHandle > ranges;
1240  iter = indices.begin();
1241  while( iter != indices.end() )
1242  {
1243  ranges.push_back( *iter );
1244  EntityHandle last = *iter;
1245  for( ++iter; iter != indices.end() && ( last + 1 ) == *iter; ++iter, ++last )
1246  ;
1247  ranges.push_back( last );
1248  }
1249  // Read file ids
1250  iter = ranges.begin();
1251  unsigned long offset = 0;
1252  while( iter != ranges.end() )
1253  {
1254  long begin = *iter;
1255  ++iter;
1256  long end = *iter;
1257  ++iter;
1258  mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, handleType, &indices[offset], indepIO,
1259  &status );
1260  if( is_error( status ) )
1261  {
1262  mhdf_closeData( filePtr, tables[0], &status );
1263  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1264  }
1265  offset += end - begin + 1;
1266  }
1267  mhdf_closeData( filePtr, tables[0], &status );
1268  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1269  assert( offset == indices.size() );
1270  std::sort( indices.begin(), indices.end() );
1271 
1272  if( sets_only )
1273  {
1274  iter = std::lower_bound( indices.begin(), indices.end(),
1276  indices.erase( iter, indices.end() );
1277  iter = std::lower_bound( indices.begin(), indices.end(), fileInfo->sets.start_id );
1278  indices.erase( indices.begin(), iter );
1279  }
1280  copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
1281 
1282  return MB_SUCCESS;
1283 }
1284 
1285 ErrorCode ReadHDF5::get_tagged_entities( int tag_index, Range& file_ids )
1286 {
1287  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
1288 
1290 
1291  // Do dense data
1292  Range::iterator hint = file_ids.begin();
1293  for( int i = 0; i < tag.num_dense_indices; ++i )
1294  {
1295  int idx = tag.dense_elem_indices[i];
1296  mhdf_EntDesc* ents;
1297  if( idx == -2 )
1298  ents = &fileInfo->sets;
1299  else if( idx == -1 )
1300  ents = &fileInfo->nodes;
1301  else
1302  {
1303  if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1304  ents = &( fileInfo->elems[idx].desc );
1305  }
1306 
1307  EntityHandle h = (EntityHandle)ents->start_id;
1308  hint = file_ids.insert( hint, h, h + ents->count - 1 );
1309  }
1310 
1311  if( !tag.have_sparse ) return MB_SUCCESS;
1312 
1313  // Do sparse data
1314 
1315  mhdf_Status status;
1316  hid_t tables[2];
1317  long size, junk;
1318  mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
1319  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1320  mhdf_closeData( filePtr, tables[1], &status );
1321  if( is_error( status ) )
1322  {
1323  mhdf_closeData( filePtr, tables[0], &status );
1324  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1325  }
1326 
1327  hid_t file_type = H5Dget_type( tables[0] );
1328  if( file_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1329 
1330  hint = file_ids.begin();
1331  EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1332  const long buffer_size = bufferSize / std::max( sizeof( EntityHandle ), H5Tget_size( file_type ) );
1333  long remaining = size, offset = 0;
1334  while( remaining )
1335  {
1336  long count = std::min( buffer_size, remaining );
1337  assert_range( buffer, count );
1338  mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, file_type, buffer, collIO, &status );
1339  if( is_error( status ) )
1340  {
1341  H5Tclose( file_type );
1342  mhdf_closeData( filePtr, *tables, &status );
1343  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1344  }
1345  H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
1346 
1347  std::sort( buffer, buffer + count );
1348  for( long i = 0; i < count; ++i )
1349  hint = file_ids.insert( hint, buffer[i], buffer[i] );
1350 
1351  remaining -= count;
1352  offset += count;
1353  }
1354 
1355  H5Tclose( file_type );
1356  mhdf_closeData( filePtr, *tables, &status );
1357  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1358 
1359  return MB_SUCCESS;
1360 }
1361 
1363  unsigned long table_size,
1364  const std::vector< int >& sorted_values,
1365  std::vector< EntityHandle >& value_indices )
1366 {
1367  debug_barrier();
1368 
1370 
1371  mhdf_Status status;
1372  size_t chunk_size = bufferSize / sizeof( int );
1373  int* buffer = reinterpret_cast< int* >( dataBuffer );
1374  size_t remaining = table_size, offset = 0;
1375  while( remaining )
1376  {
1377  // Get a block of tag values
1378  size_t count = std::min( chunk_size, remaining );
1379  assert_range( buffer, count );
1380  mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
1381  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1382 
1383  // Search tag values
1384  for( size_t i = 0; i < count; ++i )
1385  if( std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ) )
1386  value_indices.push_back( i + offset );
1387 
1388  offset += count;
1389  remaining -= count;
1390  }
1391 
1392  return MB_SUCCESS;
1393 }
1394 
1395 ErrorCode ReadHDF5::read_nodes( const Range& node_file_ids )
1396 {
1397  ErrorCode rval;
1398  mhdf_Status status;
1399  const int dim = fileInfo->nodes.vals_per_ent;
1400  Range range;
1401 
1403 
1404  if( node_file_ids.empty() && !nativeParallel ) return MB_SUCCESS;
1405 
1406  int cdim;
1407  rval = iFace->get_dimension( cdim );
1408  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1409 
1410  if( cdim < dim )
1411  {
1412  rval = iFace->set_dimension( dim );
1413  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1414  }
1415 
1416  hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
1417  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1418 
1419  EntityHandle handle;
1420  std::vector< double* > arrays( dim );
1421  const size_t num_nodes = node_file_ids.size();
1422  if( num_nodes > 0 )
1423  {
1424  rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
1425  if( MB_SUCCESS != rval )
1426  {
1427  mhdf_closeData( filePtr, data_id, &status );
1428  MB_SET_ERR( rval, "ReadHDF5 Failure" );
1429  }
1430  }
1431 
1432  if( blockedCoordinateIO )
1433  {
1434  try
1435  {
1436  for( int d = 0; d < dim; ++d )
1437  {
1438  ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
1439  reader.set_column( d );
1440  reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
1441  dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
1442  // Should normally only have one read call, unless sparse nature
1443  // of file_ids caused reader to do something strange
1444  size_t count, offset = 0;
1445  int nn = 0;
1446  while( !reader.done() )
1447  {
1448  dbgOut.printf( 3, "Reading chunk %d for dimension %d\n", ++nn, d );
1449  reader.read( arrays[d] + offset, count );
1450  offset += count;
1451  }
1452  if( offset != num_nodes )
1453  {
1454  mhdf_closeData( filePtr, data_id, &status );
1455  assert( false );
1456  return MB_FAILURE;
1457  }
1458  }
1459  }
1461  {
1462  mhdf_closeData( filePtr, data_id, &status );
1463  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1464  }
1465  }
1466  else
1467  { // !blockedCoordinateIO
1468  double* buffer = (double*)dataBuffer;
1469  long chunk_size = bufferSize / ( 3 * sizeof( double ) );
1470  long coffset = 0;
1471  int nn = 0;
1472  try
1473  {
1474  ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
1475  reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
1476  dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
1477  while( !reader.done() )
1478  {
1479  dbgOut.tprintf( 3, "Reading chunk %d of node coords\n", ++nn );
1480 
1481  size_t count;
1482  reader.read( buffer, count );
1483 
1484  for( size_t i = 0; i < count; ++i )
1485  for( int d = 0; d < dim; ++d )
1486  arrays[d][coffset + i] = buffer[dim * i + d];
1487  coffset += count;
1488  }
1489  }
1491  {
1492  mhdf_closeData( filePtr, data_id, &status );
1493  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1494  }
1495  }
1496 
1497  dbgOut.print( 3, "Closing node coordinate table\n" );
1498  mhdf_closeData( filePtr, data_id, &status );
1499  for( int d = dim; d < cdim; ++d )
1500  memset( arrays[d], 0, num_nodes * sizeof( double ) );
1501 
1502  dbgOut.printf( 3, "Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size() );
1503  return insert_in_id_map( node_file_ids, handle );
1504 }
1505 
1507 {
1508  Range ids;
1509  ids.insert( fileInfo->elems[i].desc.start_id,
1510  fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1 );
1511  return read_elems( i, ids );
1512 }
1513 
1514 ErrorCode ReadHDF5::read_elems( int i, const Range& file_ids, Range* node_ids )
1515 {
1516  if( fileInfo->elems[i].desc.vals_per_ent < 0 )
1517  {
1518  if( node_ids != 0 ) // Not implemented for version 3 format of poly data
1520  return read_poly( fileInfo->elems[i], file_ids );
1521  }
1522  else
1523  return read_elems( fileInfo->elems[i], file_ids, node_ids );
1524 }
1525 
1526 ErrorCode ReadHDF5::read_elems( const mhdf_ElemDesc& elems, const Range& file_ids, Range* node_ids )
1527 {
1529 
1530  debug_barrier();
1531  dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", elems.handle,
1532  (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );
1533 
1534  ErrorCode rval = MB_SUCCESS;
1535  mhdf_Status status;
1536 
1537  EntityType type = CN::EntityTypeFromName( elems.type );
1538  if( type == MBMAXTYPE )
1539  {
1540  MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" );
1541  }
1542 
1543  const int nodes_per_elem = elems.desc.vals_per_ent;
1544  const size_t count = file_ids.size();
1545  hid_t data_id = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
1546  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1547 
1548  EntityHandle handle;
1549  EntityHandle* array = 0;
1550  if( count > 0 ) rval = readUtil->get_element_connect( count, nodes_per_elem, type, 0, handle, array );
1551  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1552 
1553  try
1554  {
1555  EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1556  const size_t buffer_size = bufferSize / ( sizeof( EntityHandle ) * nodes_per_elem );
1557  ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
1558  reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType );
1559  dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n", reader.get_read_count(),
1560  elems.handle );
1561  EntityHandle* iter = array;
1562  int nn = 0;
1563  while( !reader.done() )
1564  {
1565  dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
1566 
1567  size_t num_read;
1568  reader.read( buffer, num_read );
1569  iter = std::copy( buffer, buffer + num_read * nodes_per_elem, iter );
1570 
1571  if( node_ids )
1572  {
1573  std::sort( buffer, buffer + num_read * nodes_per_elem );
1574  num_read = std::unique( buffer, buffer + num_read * nodes_per_elem ) - buffer;
1575  copy_sorted_file_ids( buffer, num_read, *node_ids );
1576  }
1577  }
1578  assert( iter - array == (ptrdiff_t)count * nodes_per_elem );
1579  }
1581  {
1582  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1583  }
1584 
1585  if( !node_ids )
1586  {
1587  rval = convert_id_to_handle( array, count * nodes_per_elem );
1588  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1589 
1590  rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
1591  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1592  }
1593  else
1594  {
1595  IDConnectivity t;
1596  t.handle = handle;
1597  t.count = count;
1598  t.nodes_per_elem = nodes_per_elem;
1599  t.array = array;
1600  idConnectivityList.push_back( t );
1601  }
1602 
1603  return insert_in_id_map( file_ids, handle );
1604 }
1605 
1607 {
1608  ErrorCode rval;
1609  std::vector< IDConnectivity >::iterator i;
1610  for( i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i )
1611  {
1612  rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
1613  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1614 
1615  rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
1616  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1617  }
1618  idConnectivityList.clear();
1619 
1620  return MB_SUCCESS;
1621 }
1622 
1624 {
1625  mhdf_Status status;
1626  ErrorCode rval;
1627 
1629 
1630  hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
1631  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1632 
1633  rval = read_node_adj_elems( group, table, handles_out );
1634 
1635  mhdf_closeData( filePtr, table, &status );
1636  if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR_RET_VAL( "ReadHDF5 Failure", MB_FAILURE );
1637 
1638  return rval;
1639 }
1640 
1641 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, hid_t table_handle, Range* handles_out )
1642 {
1644 
1645  debug_barrier();
1646 
1647  mhdf_Status status;
1648  ErrorCode rval;
1649  IODebugTrack debug_track( debugTrack, std::string( group.handle ) );
1650 
1651  // Copy data to local variables (makes other code clearer)
1652  const int node_per_elem = group.desc.vals_per_ent;
1653  long start_id = group.desc.start_id;
1654  long remaining = group.desc.count;
1655  const EntityType type = CN::EntityTypeFromName( group.type );
1656 
1657  // Figure out how many elements we can read in each pass
1658  long* const buffer = reinterpret_cast< long* >( dataBuffer );
1659  const long buffer_size = bufferSize / ( node_per_elem * sizeof( buffer[0] ) );
1660  // Read all element connectivity in buffer_size blocks
1661  long offset = 0;
1662  dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n", group.handle,
1663  ( remaining + buffer_size - 1 ) / buffer_size );
1664  int nn = 0;
1665  Range::iterator hint;
1666  if( handles_out ) hint = handles_out->begin();
1667  while( remaining )
1668  {
1669  dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
1670 
1671  // Read a block of connectivity data
1672  const long count = std::min( remaining, buffer_size );
1673  debug_track.record_io( offset, count );
1674  assert_range( buffer, count * node_per_elem );
1675  mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
1676  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1677  offset += count;
1678  remaining -= count;
1679 
1680  // Count the number of elements in the block that we want,
1681  // zero connectivity for other elements
1682  long num_elem = 0;
1683  long* iter = buffer;
1684  for( long i = 0; i < count; ++i )
1685  {
1686  for( int j = 0; j < node_per_elem; ++j )
1687  {
1688  iter[j] = (long)idMap.find( iter[j] );
1689  if( !iter[j] )
1690  {
1691  iter[0] = 0;
1692  break;
1693  }
1694  }
1695  if( iter[0] ) ++num_elem;
1696  iter += node_per_elem;
1697  }
1698 
1699  if( !num_elem )
1700  {
1701  start_id += count;
1702  continue;
1703  }
1704 
1705  // Create elements
1706  EntityHandle handle;
1707  EntityHandle* array;
1708  rval = readUtil->get_element_connect( (int)num_elem, node_per_elem, type, 0, handle, array );
1709  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1710 
1711  // Copy all non-zero connectivity values
1712  iter = buffer;
1713  EntityHandle* iter2 = array;
1714  EntityHandle h = handle;
1715  for( long i = 0; i < count; ++i )
1716  {
1717  if( !*iter )
1718  {
1719  iter += node_per_elem;
1720  continue;
1721  }
1722  if( !idMap.insert( start_id + i, h++, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1723 
1724  long* const end = iter + node_per_elem;
1725  for( ; iter != end; ++iter, ++iter2 )
1726  *iter2 = (EntityHandle)*iter;
1727  }
1728  assert( iter2 - array == num_elem * node_per_elem );
1729  start_id += count;
1730 
1731  rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
1732  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1733  if( handles_out ) hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
1734  }
1735 
1736  debug_track.all_reduce();
1737  return MB_SUCCESS;
1738 }
1739 
1740 ErrorCode ReadHDF5::read_elems( int i, const Range& elems_in, Range& nodes )
1741 {
1743 
1744  debug_barrier();
1745  dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle,
1746  (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );
1747 
1748  EntityHandle* const buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1749  const int node_per_elem = fileInfo->elems[i].desc.vals_per_ent;
1750  const size_t buffer_size = bufferSize / ( node_per_elem * sizeof( EntityHandle ) );
1751 
1752  if( elems_in.empty() ) return MB_SUCCESS;
1753 
1754  assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
1755  assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
1756 
1757  // We don't support version 3 style poly element data
1759 
1760  mhdf_Status status;
1761  hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
1762  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1763 
1764  try
1765  {
1766  ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
1767  reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, buffer_size, handleType );
1768  dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(),
1769  fileInfo->elems[i].handle );
1770  int nn = 0;
1771  while( !reader.done() )
1772  {
1773  dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
1774  size_t num_read;
1775  reader.read( buffer, num_read );
1776  std::sort( buffer, buffer + num_read * node_per_elem );
1777  num_read = std::unique( buffer, buffer + num_read * node_per_elem ) - buffer;
1778  copy_sorted_file_ids( buffer, num_read, nodes );
1779  }
1780  }
1782  {
1783  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1784  }
1785 
1786  return MB_SUCCESS;
1787 }
1788 
1789 ErrorCode ReadHDF5::read_poly( const mhdf_ElemDesc& elems, const Range& file_ids )
1790 {
1791  class PolyReader : public ReadHDF5VarLen
1792  {
1793  private:
1794  const EntityType type;
1795  ReadHDF5* readHDF5;
1796 
1797  public:
1798  PolyReader( EntityType elem_type, void* buffer, size_t buffer_size, ReadHDF5* owner, DebugOutput& dbg )
1799  : ReadHDF5VarLen( dbg, buffer, buffer_size ), type( elem_type ), readHDF5( owner )
1800  {
1801  }
1802  virtual ~PolyReader() {}
1803  ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
1804  {
1805  size_t valid;
1806  EntityHandle* conn = reinterpret_cast< EntityHandle* >( data );
1807  readHDF5->convert_id_to_handle( conn, len, valid );
1808  if( valid != (size_t)len ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
1809  EntityHandle handle;
1810  ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
1811  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1812 
1813  rval = readHDF5->insert_in_id_map( file_id, handle );
1814  return rval;
1815  }
1816  };
1817 
1819 
1820  debug_barrier();
1821 
1822  EntityType type = CN::EntityTypeFromName( elems.type );
1823  if( type == MBMAXTYPE )
1824  {
1825  MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" );
1826  }
1827 
1828  hid_t handles[2];
1829  mhdf_Status status;
1830  long num_poly, num_conn, first_id;
1831  mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, handles, &status );
1832  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1833 
1834  std::string nm( elems.handle );
1835  ReadHDF5Dataset offset_reader( ( nm + " offsets" ).c_str(), handles[0], nativeParallel, mpiComm, true );
1836  ReadHDF5Dataset connect_reader( ( nm + " data" ).c_str(), handles[1], nativeParallel, mpiComm, true );
1837 
1838  PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
1839  return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
1840 }
1841 
1843 {
1844  ErrorCode rval;
1845 
1846  // Build list of entities that we need to find the sides of
1847  Range explicit_ents;
1848  Range::iterator hint = explicit_ents.begin();
1849  for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
1850  {
1851  EntityHandle start = i->value;
1852  EntityHandle end = i->value + i->count - 1;
1853  EntityType type = TYPE_FROM_HANDLE( start );
1854  assert( type == TYPE_FROM_HANDLE( end ) ); // Otherwise handle space entirely full!!
1855  if( type != MBVERTEX && type != MBENTITYSET ) hint = explicit_ents.insert( hint, start, end );
1856  }
1857  explicit_ents = subtract( explicit_ents, side_ents );
1858 
1859  // Figure out which entities we want to delete
1860  Range dead_ents( side_ents );
1861  Range::iterator ds, de, es;
1862  ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
1863  de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
1864  if( ds != de )
1865  {
1866  // Get subset of explicit ents of dimension greater than 1
1867  es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
1868  Range subset, adj;
1869  subset.insert( es, explicit_ents.end() );
1870  rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
1871  if( MB_SUCCESS != rval ) return rval;
1872  dead_ents = subtract( dead_ents, adj );
1873  }
1874  ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
1875  de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
1876  assert( de == dead_ents.end() );
1877  if( ds != de )
1878  {
1879  // Get subset of explicit ents of dimension 3
1880  es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
1881  Range subset, adj;
1882  subset.insert( es, explicit_ents.end() );
1883  rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
1884  if( MB_SUCCESS != rval ) return rval;
1885  dead_ents = subtract( dead_ents, adj );
1886  }
1887 
1888  // Now delete anything remaining in dead_ents
1889  dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
1890  dbgOut.print( 4, "\tDead entities: ", dead_ents );
1891  rval = iFace->delete_entities( dead_ents );
1892  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1893 
1894  // Remove dead entities from ID map
1895  while( !dead_ents.empty() )
1896  {
1897  EntityHandle start = dead_ents.front();
1898  EntityID count = dead_ents.const_pair_begin()->second - start + 1;
1899  IDMap::iterator rit;
1900  for( rit = idMap.begin(); rit != idMap.end(); ++rit )
1901  if( rit->value <= start && (EntityID)( start - rit->value ) < rit->count ) break;
1902  if( rit == idMap.end() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1903 
1904  EntityID offset = start - rit->value;
1905  EntityID avail = rit->count - offset;
1906  if( avail < count ) count = avail;
1907 
1908  dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
1909  idMap.erase( rit->begin + offset, count );
1910  }
1911 
1912  return MB_SUCCESS;
1913 }
1914 
1916 {
1918 
1919  debug_barrier();
1920 
1921  mhdf_Status status;
1922  ErrorCode rval;
1923 
1924  const size_t num_sets = fileInfo->sets.count;
1925  if( !num_sets ) // If no sets at all!
1926  return MB_SUCCESS;
1927 
1928  // Create sets
1929  std::vector< unsigned > flags( file_ids.size() );
1930  Range::iterator si = file_ids.begin();
1931  for( size_t i = 0; i < flags.size(); ++i, ++si )
1932  flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
1933  EntityHandle start_handle;
1934  // the files ids could be empty, for empty partitions
1935  if( !file_ids.empty() )
1936  {
1937  rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
1938  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1939  rval = insert_in_id_map( file_ids, start_handle );
1940  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1941  }
1942 
1943  // Read contents
1945  {
1946  long len = 0;
1947  hid_t handle = mhdf_openSetData( filePtr, &len, &status );
1948  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1949 
1950  ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
1951  rval = read_set_data( file_ids, start_handle, dat, CONTENT );
1952  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1953  }
1954 
1955  // Read set child lists
1957  {
1958  long len = 0;
1959  hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
1960  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1961 
1962  ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
1963  rval = read_set_data( file_ids, start_handle, dat, CHILD );
1964  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1965  }
1966 
1967  // Read set parent lists
1968  if( fileInfo->have_set_parents )
1969  {
1970  long len = 0;
1971  hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
1972  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1973 
1974  ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
1975  rval = read_set_data( file_ids, start_handle, dat, PARENT );
1976  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
1977  }
1978 
1979  return MB_SUCCESS;
1980 }
1981 
1983 {
1985 
1986  assert( !setMeta );
1987  const long num_sets = fileInfo->sets.count;
1988  if( !num_sets ) return MB_SUCCESS;
1989 
1990  mhdf_Status status;
1991  hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
1992  if( is_error( status ) )
1993  {
1994  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
1995  }
1996 
1997  // Allocate extra space if we need it for data conversion
1998  hid_t meta_type = H5Dget_type( handle );
1999  size_t size = H5Tget_size( meta_type );
2000  if( size > sizeof( long ) )
2001  setMeta = new long[( num_sets * size + ( sizeof( long ) - 1 ) ) / sizeof( long )][4];
2002  else
2003  setMeta = new long[num_sets][4];
2004 
2005  // Set some parameters based on whether or not each proc reads the
2006  // table or only the root reads it and bcasts it to the others
2007  int rank = 0;
2008  bool bcast = false;
2009  hid_t ioprop = H5P_DEFAULT;
2010 #ifdef MOAB_HAVE_MPI
2011  MPI_Comm comm = 0;
2012  if( nativeParallel )
2013  {
2014  rank = myPcomm->proc_config().proc_rank();
2015  comm = myPcomm->proc_config().proc_comm();
2016  bcast = bcastDuplicateReads;
2017  if( !bcast ) ioprop = collIO;
2018  }
2019 #endif
2020 
2021  if( !bcast || 0 == rank )
2022  {
2023  mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
2024  if( is_error( status ) )
2025  {
2026  H5Tclose( meta_type );
2027  mhdf_closeData( filePtr, handle, &status );
2028  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2029  }
2030 
2031  H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets * 4, setMeta, 0, H5P_DEFAULT );
2032  }
2033  mhdf_closeData( filePtr, handle, &status );
2034  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2035  H5Tclose( meta_type );
2036 
2037  if( bcast )
2038  {
2039 #ifdef MOAB_HAVE_MPI
2040  int ierr = MPI_Bcast( (void*)setMeta, num_sets * 4, MPI_LONG, 0, comm );
2041  if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2042 #else
2043  assert( rank == 0 ); // If not MPI, then only one proc
2044 #endif
2045  }
2046 
2047  return MB_SUCCESS;
2048 }
2049 
2050 ErrorCode ReadHDF5::read_set_ids_recursive( Range& sets_in_out, bool contained_sets, bool child_sets )
2051 {
2053  mhdf_Status status;
2054 
2055  if( !fileInfo->have_set_children ) child_sets = false;
2056  if( !fileInfo->have_set_contents ) contained_sets = false;
2057  if( !child_sets && !contained_sets ) return MB_SUCCESS;
2058 
2059  // Open data tables
2060  if( fileInfo->sets.count == 0 )
2061  {
2062  assert( sets_in_out.empty() );
2063  return MB_SUCCESS;
2064  }
2065 
2066  if( !contained_sets && !child_sets ) return MB_SUCCESS;
2067 
2068  ReadHDF5Dataset cont( "set contents", false, mpiComm );
2069  ReadHDF5Dataset child( "set children", false, mpiComm );
2070 
2071  if( contained_sets )
2072  {
2073  long content_len = 0;
2074  hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
2075  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2076  try
2077  {
2078  cont.init( content_handle, true );
2079  }
2081  {
2082  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2083  }
2084  }
2085 
2086  if( child_sets )
2087  {
2088  long child_len = 0;
2089  hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
2090  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2091  try
2092  {
2093  child.init( child_handle, true );
2094  }
2096  {
2097  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2098  }
2099  }
2100 
2101  ErrorCode rval = MB_SUCCESS;
2102  Range children, new_children( sets_in_out );
2103  int iteration_count = 0;
2104  do
2105  {
2106  ++iteration_count;
2107  dbgOut.tprintf( 2, "Iteration %d of read_set_ids_recursive\n", iteration_count );
2108  children.clear();
2109  if( child_sets )
2110  {
2111  rval = read_set_data( new_children, 0, child, CHILD, &children );
2112  if( MB_SUCCESS != rval ) break;
2113  }
2114  if( contained_sets )
2115  {
2116  rval = read_set_data( new_children, 0, cont, CONTENT, &children );
2117  // Remove any non-set values
2118  Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
2119  children.erase( children.begin(), it );
2120  it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
2121  children.erase( it, children.end() );
2122  if( MB_SUCCESS != rval ) break;
2123  }
2124  new_children = subtract( children, sets_in_out );
2125  dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
2126  sets_in_out.merge( new_children );
2127  } while( !new_children.empty() );
2128 
2129  return MB_SUCCESS;
2130 }
2131 
2132 ErrorCode ReadHDF5::find_sets_containing( Range& sets_out, bool read_set_containing_parents )
2133 {
2134  ErrorCode rval;
2135  mhdf_Status status;
2136 
2138 
2139  if( !fileInfo->have_set_contents ) return MB_SUCCESS;
2140  assert( fileInfo->sets.count );
2141 
2142  // Open data tables
2143  long content_len = 0;
2144  hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
2145  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2146 
2147  hid_t data_type = H5Dget_type( content_handle );
2148 
2149  rval = find_sets_containing( content_handle, data_type, content_len, read_set_containing_parents, sets_out );
2150 
2151  H5Tclose( data_type );
2152 
2153  mhdf_closeData( filePtr, content_handle, &status );
2154  if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2155 
2156  return rval;
2157 }
2158 
2159 static bool set_map_intersect( bool ranged,
2160  const long* contents,
2161  int content_len,
2162  const RangeMap< long, EntityHandle >& id_map )
2163 {
2164  if( ranged )
2165  {
2166  if( !content_len || id_map.empty() ) return false;
2167 
2168  const long* j = contents;
2169  const long* const end = contents + content_len;
2170  assert( content_len % 2 == 0 );
2171  while( j != end )
2172  {
2173  long start = *( j++ );
2174  long count = *( j++ );
2175  if( id_map.intersects( start, count ) ) return true;
2176  }
2177  }
2178  else
2179  {
2180  const long* const end = contents + content_len;
2181  for( const long* i = contents; i != end; ++i )
2182  if( id_map.exists( *i ) ) return true;
2183  }
2184 
2185  return false;
2186 }
2187 
2189 {
2190  bool operator()( const long a1[4], const long a2[4] )
2191  {
2192  return a1[ReadHDF5::CONTENT] < a2[0];
2193  }
2194 };
2195 
2197  hid_t content_type,
2198  long contents_len,
2199  bool read_set_containing_parents,
2200  Range& file_ids )
2201 {
2203 
2204  // Scan all set contents data
2205 
2206  const size_t content_size = H5Tget_size( content_type );
2207  const long num_sets = fileInfo->sets.count;
2208  dbgOut.printf( 2, "Searching contents of %ld\n", num_sets );
2209  mhdf_Status status;
2210 
2211  int rank = 0;
2212  bool bcast = false;
2213 #ifdef MOAB_HAVE_MPI
2214  MPI_Comm comm = 0;
2215  if( nativeParallel )
2216  {
2217  rank = myPcomm->proc_config().proc_rank();
2218  comm = myPcomm->proc_config().proc_comm();
2219  bcast = bcastDuplicateReads;
2220  }
2221 #endif
2222 
2223  // Check offsets so that we don't read past end of table or
2224  // walk off end of array.
2225  long prev = -1;
2226  for( long i = 0; i < num_sets; ++i )
2227  {
2228  if( setMeta[i][CONTENT] < prev )
2229  {
2230  std::cerr << "Invalid data in set contents offsets at position " << i << ": index " << setMeta[i][CONTENT]
2231  << " is less than previous index " << prev << std::endl;
2232  std::cerr.flush();
2233  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2234  }
2235  prev = setMeta[i][CONTENT];
2236  }
2237  if( setMeta[num_sets - 1][CONTENT] >= contents_len )
2238  {
2239  std::cerr << "Maximum set content index " << setMeta[num_sets - 1][CONTENT]
2240  << " exceeds contents table length of " << contents_len << std::endl;
2241  std::cerr.flush();
2242  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2243  }
2244 
2245  // Set up buffer for reading set contents
2246  long* const content_buffer = (long*)dataBuffer;
2247  const long content_len = bufferSize / std::max( content_size, sizeof( long ) );
2248 
2249  // Scan set table
2250  Range::iterator hint = file_ids.begin();
2251  Range tmp_range;
2252  long prev_idx = -1;
2253  int mm = 0;
2254  long sets_offset = 0;
2255  long temp_content[4];
2256  while( sets_offset < num_sets )
2257  {
2258  temp_content[0] = content_len + prev_idx;
2259  long sets_count =
2260  std::lower_bound( setMeta + sets_offset, setMeta + num_sets, temp_content, SetContOffComp() ) - setMeta -
2261  sets_offset;
2262  assert( sets_count >= 0 && sets_offset + sets_count <= num_sets );
2263  if( !sets_count )
2264  { // Contents of single set don't fit in buffer
2265  long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
2266  long content_offset = prev_idx + 1;
2267  while( content_remaining )
2268  {
2269  long content_count = content_len < content_remaining ? 2 * ( content_len / 2 ) : content_remaining;
2270  assert_range( content_buffer, content_count );
2271  dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count );
2272  if( !bcast || 0 == rank )
2273  {
2274  if( !bcast )
2275  mhdf_readSetDataWithOpt( contents_handle, content_offset, content_count, content_type,
2276  content_buffer, collIO, &status );
2277  else
2278  mhdf_readSetData( contents_handle, content_offset, content_count, content_type, content_buffer,
2279  &status );
2280  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2281 
2282  H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
2283  }
2284  if( bcast )
2285  {
2286 #ifdef MOAB_HAVE_MPI
2287  int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
2288  if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2289 #else
2290  assert( rank == 0 ); // If not MPI, then only one proc
2291 #endif
2292  }
2293 
2294  if( read_set_containing_parents )
2295  {
2296  tmp_range.clear();
2297  if( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT )
2298  tmp_range.insert( *content_buffer, *( content_buffer + 1 ) );
2299  else
2300  std::copy( content_buffer, content_buffer + content_count, range_inserter( tmp_range ) );
2301  tmp_range = intersect( tmp_range, file_ids );
2302  }
2303 
2304  if( !tmp_range.empty() || set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
2305  content_buffer, content_count, idMap ) )
2306  {
2307  long id = fileInfo->sets.start_id + sets_offset;
2308  hint = file_ids.insert( hint, id, id );
2309  if( !nativeParallel ) // Don't stop if doing READ_PART because we need to read
2310  // collectively
2311  break;
2312  }
2313  content_remaining -= content_count;
2314  content_offset += content_count;
2315  }
2316  prev_idx = setMeta[sets_offset][CONTENT];
2317  sets_count = 1;
2318  }
2319  else if( long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx )
2320  {
2321  assert( sets_count > 0 );
2322  assert_range( content_buffer, read_num );
2323  dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num );
2324  if( !bcast || 0 == rank )
2325  {
2326  if( !bcast )
2327  mhdf_readSetDataWithOpt( contents_handle, prev_idx + 1, read_num, content_type, content_buffer,
2328  collIO, &status );
2329  else
2330  mhdf_readSetData( contents_handle, prev_idx + 1, read_num, content_type, content_buffer, &status );
2331  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2332 
2333  H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
2334  }
2335  if( bcast )
2336  {
2337 #ifdef MOAB_HAVE_MPI
2338  int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
2339  if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2340 #else
2341  assert( rank == 0 ); // If not MPI, then only one proc
2342 #endif
2343  }
2344 
2345  long* buff_iter = content_buffer;
2346  for( long i = 0; i < sets_count; ++i )
2347  {
2348  long set_size = setMeta[i + sets_offset][CONTENT] - prev_idx;
2349  prev_idx += set_size;
2350 
2351  // Check whether contents include set already being loaded
2352  if( read_set_containing_parents )
2353  {
2354  tmp_range.clear();
2355  if( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT )
2356  {
2357  // put in tmp_range the contents on the set
2358  // file_ids contain at this points only other sets
2359  const long* j = buff_iter;
2360  const long* const end = buff_iter + set_size;
2361  assert( set_size % 2 == 0 );
2362  while( j != end )
2363  {
2364  long start = *( j++ );
2365  long count = *( j++ );
2366  tmp_range.insert( start, start + count - 1 );
2367  }
2368  }
2369  else
2370  std::copy( buff_iter, buff_iter + set_size, range_inserter( tmp_range ) );
2371  tmp_range = intersect( tmp_range, file_ids );
2372  }
2373 
2374  if( !tmp_range.empty() ||
2375  set_map_intersect( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT, buff_iter, set_size, idMap ) )
2376  {
2377  long id = fileInfo->sets.start_id + sets_offset + i;
2378  hint = file_ids.insert( hint, id, id );
2379  }
2380  buff_iter += set_size;
2381  }
2382  }
2383 
2384  sets_offset += sets_count;
2385  }
2386 
2387  return MB_SUCCESS;
2388 }
2389 
2391  int ranged,
2392  EntityHandle* contents,
2393  long length,
2394  Range& results )
2395 {
2396  if( ranged )
2397  {
2398  assert( length % 2 == 0 );
2399  for( long i = 0; i < length; i += 2 )
2400  hint = results.insert( hint, contents[i], contents[i] + contents[i + 1] - 1 );
2401  }
2402  else
2403  {
2404  std::sort( contents, contents + length );
2405  for( long i = 0; i < length; ++i )
2406  hint = results.insert( hint, contents[i] );
2407  }
2408  return hint;
2409 }
2410 
2412  EntityHandle start_handle,
2413  ReadHDF5Dataset& data,
2414  SetMode mode,
2415  Range* file_ids_out )
2416 {
2417  ErrorCode rval;
2419  Range::iterator out_hint;
2420  if( file_ids_out ) out_hint = file_ids_out->begin();
2421 
2422  // Construct range of offsets into data table at which to read
2423  // Note: all offsets are incremented by TWEAK because Range cannot
2424  // store zeros.
2425  const long TWEAK = 1;
2426  Range data_offsets;
2427  Range::iterator hint = data_offsets.begin();
2428  pi = set_file_ids.const_pair_begin();
2429  if( (long)pi->first == fileInfo->sets.start_id )
2430  {
2431  long second = pi->second - fileInfo->sets.start_id;
2432  if( setMeta[second][mode] >= 0 ) hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode] + TWEAK );
2433  ++pi;
2434  }
2435  for( ; pi != set_file_ids.const_pair_end(); ++pi )
2436  {
2437  long first = pi->first - fileInfo->sets.start_id;
2438  long second = pi->second - fileInfo->sets.start_id;
2439  long idx1 = setMeta[first - 1][mode] + 1;
2440  long idx2 = setMeta[second][mode];
2441  if( idx2 >= idx1 ) hint = data_offsets.insert( hint, idx1 + TWEAK, idx2 + TWEAK );
2442  }
2443  try
2444  {
2445  data.set_file_ids( data_offsets, TWEAK, bufferSize / sizeof( EntityHandle ), handleType );
2446  }
2448  {
2449  return MB_FAILURE;
2450  }
2451 
2452  // We need to increment this for each processed set because
2453  // the sets were created in the order of the ids in file_ids.
2454  EntityHandle h = start_handle;
2455 
2456  const long ranged_flag = ( mode == CONTENT ) ? mhdf_SET_RANGE_BIT : 0;
2457 
2458  std::vector< EntityHandle > partial; // For when we read only part of the contents of a set/entity
2459  Range::const_iterator fileid_iter = set_file_ids.begin();
2460  EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
2461  size_t count, offset;
2462 
2463  int nn = 0;
2464  /*
2465  #ifdef MOAB_HAVE_MPI
2466  if (nativeParallel && mode==CONTENT && myPcomm->proc_config().proc_size()>1 &&
2467  data_offsets.empty())
2468  {
2469  MB_SET_ERR_CONT( "ReadHDF5 Failure: Attempt reading an empty dataset on proc " <<
2470  myPcomm->proc_config().proc_rank());
2471  MPI_Abort(myPcomm->proc_config().proc_comm(), 1);
2472  }
2473  #endif
2474  */
2475  if( ( 1 >= set_file_ids.size() ) && ( data.done() ) && moab::ReadHDF5::CONTENT == mode )
2476  // do at least one null read, it is needed in parallel
2477  data.null_read();
2478 
2479  while( !data.done() )
2480  {
2481  dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
2482  try
2483  {
2484  data.read( buffer, count );
2485  }
2487  {
2488  return MB_FAILURE;
2489  }
2490 
2491  // Assert not appropriate here - I might have treated all my file ids, but maybe
2492  // another proc hasn't; for me, count will be zero, so I won't do anything, but
2493  // I still need to go through the motions to make the read work
2494 
2495  // Handle 'special' case where we read some, but not all
2496  // of the data for an entity during the last iteration.
2497  offset = 0;
2498  if( !partial.empty() )
2499  { // Didn't read all of previous entity
2500  assert( fileid_iter != set_file_ids.end() );
2501  size_t num_prev = partial.size();
2502  size_t idx = *fileid_iter - fileInfo->sets.start_id;
2503  size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
2504  offset = len - num_prev;
2505  if( offset > count )
2506  { // Still don't have all
2507  partial.insert( partial.end(), buffer, buffer + count );
2508  continue;
2509  }
2510 
2511  partial.insert( partial.end(), buffer, buffer + offset );
2512  if( file_ids_out )
2513  {
2514  out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, &partial[0], partial.size(),
2515  *file_ids_out );
2516  }
2517  else
2518  {
2519  switch( mode )
2520  {
2521  size_t valid;
2522  case CONTENT:
2523  if( setMeta[idx][3] & ranged_flag )
2524  {
2525  if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
2526  Range range;
2527  convert_range_to_handle( &partial[0], len / 2, range );
2528  rval = moab()->add_entities( h, range );
2529  }
2530  else
2531  {
2532  convert_id_to_handle( &partial[0], len, valid );
2533  rval = moab()->add_entities( h, &partial[0], valid );
2534  }
2535  break;
2536  case CHILD:
2537  convert_id_to_handle( &partial[0], len, valid );
2538  rval = moab()->add_child_meshsets( h, &partial[0], valid );
2539  break;
2540  case PARENT:
2541  convert_id_to_handle( &partial[0], len, valid );
2542  rval = moab()->add_parent_meshsets( h, &partial[0], valid );
2543  break;
2544  default:
2545  break;
2546  }
2547  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
2548  }
2549 
2550  ++fileid_iter;
2551  ++h;
2552  partial.clear();
2553  }
2554 
2555  // Process contents for all entities for which we
2556  // have read the complete list
2557  while( offset < count )
2558  {
2559  assert( fileid_iter != set_file_ids.end() );
2560  size_t idx = *fileid_iter - fileInfo->sets.start_id;
2561  size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
2562  // If we did not read all of the final entity,
2563  // store what we did read to be processed in the
2564  // next iteration
2565  if( offset + len > count )
2566  {
2567  partial.insert( partial.end(), buffer + offset, buffer + count );
2568  break;
2569  }
2570 
2571  if( file_ids_out )
2572  {
2573  out_hint =
2574  copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, buffer + offset, len, *file_ids_out );
2575  }
2576  else
2577  {
2578  switch( mode )
2579  {
2580  size_t valid;
2581  case CONTENT:
2582  if( setMeta[idx][3] & ranged_flag )
2583  {
2584  if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
2585  Range range;
2586  convert_range_to_handle( buffer + offset, len / 2, range );
2587  rval = moab()->add_entities( h, range );
2588  }
2589  else
2590  {
2591  convert_id_to_handle( buffer + offset, len, valid );
2592  rval = moab()->add_entities( h, buffer + offset, valid );
2593  }
2594  break;
2595  case CHILD:
2596  convert_id_to_handle( buffer + offset, len, valid );
2597  rval = moab()->add_child_meshsets( h, buffer + offset, valid );
2598  break;
2599  case PARENT:
2600  convert_id_to_handle( buffer + offset, len, valid );
2601  rval = moab()->add_parent_meshsets( h, buffer + offset, valid );
2602  break;
2603  default:
2604  break;
2605  }
2606  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
2607  }
2608 
2609  ++fileid_iter;
2610  ++h;
2611  offset += len;
2612  }
2613  }
2614 
2615  return MB_SUCCESS;
2616 }
2617 
2619 {
2621 
2622  if( !fileInfo->have_set_contents ) return MB_SUCCESS;
2623  dbgOut.tprint( 2, "Reading set contained file IDs\n" );
2624  try
2625  {
2626  mhdf_Status status;
2627  long content_len;
2628  hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
2629  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2630  ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
2631 
2632  return read_set_data( sets, 0, data, CONTENT, &file_ids );
2633  }
2635  {
2636  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2637  }
2638 }
2639 
2640 ErrorCode ReadHDF5::read_adjacencies( hid_t table, long table_len )
2641 {
2643 
2644  ErrorCode rval;
2645  mhdf_Status status;
2646 
2647  debug_barrier();
2648 
2649  hid_t read_type = H5Dget_type( table );
2650  if( read_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2651  const bool convert = !H5Tequal( read_type, handleType );
2652 
2654  size_t chunk_size = bufferSize / H5Tget_size( read_type );
2655  size_t remaining = table_len;
2656  size_t left_over = 0;
2657  size_t offset = 0;
2658  dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
2659  (unsigned long)( remaining + chunk_size - 1 ) / chunk_size );
2660  int nn = 0;
2661  while( remaining )
2662  {
2663  dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
2664 
2665  size_t count = std::min( chunk_size, remaining );
2666  count -= left_over;
2667  remaining -= count;
2668 
2669  assert_range( buffer + left_over, count );
2670  mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over, collIO, &status );
2671  if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2672 
2673  if( convert )
2674  {
2675  herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
2676  if( err < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2677  }
2678 
2679  EntityHandle* iter = buffer;
2680  EntityHandle* end = buffer + count + left_over;
2681  while( end - iter >= 3 )
2682  {
2683  EntityHandle h = idMap.find( *iter++ );
2684  EntityHandle count2 = *iter++;
2685  if( !h )
2686  {
2687  iter += count2;
2688  continue;
2689  }
2690 
2691  if( count2 < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2692 
2693  if( end < count2 + iter )
2694  {
2695  iter -= 2;
2696  break;
2697  }
2698 
2699  size_t valid;
2700  convert_id_to_handle( iter, count2, valid, idMap );
2701  rval = iFace->add_adjacencies( h, iter, valid, false );
2702  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
2703 
2704  iter += count2;
2705  }
2706 
2707  left_over = end - iter;
2708  assert_range( (char*)buffer, left_over );
2709  assert_range( (char*)iter, left_over );
2710  memmove( buffer, iter, left_over );
2711  }
2712 
2713  assert( !left_over ); // Unexpected truncation of data
2714 
2715  return MB_SUCCESS;
2716 }
2717 
2719 {
2721 
2722  dbgOut.tprintf( 2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );
2723 
2724  debug_barrier();
2725 
2726  ErrorCode rval;
2727  mhdf_Status status;
2728  Tag tag = 0;
2729  hid_t read_type = -1;
2730  bool table_type;
2731  rval = create_tag( fileInfo->tags[tag_index], tag, read_type );
2732  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
2733 
2734  if( fileInfo->tags[tag_index].have_sparse )
2735  {
2736  hid_t handles[3];
2737  long num_ent, num_val;
2738  mhdf_openSparseTagData( filePtr, fileInfo->tags[tag_index].name, &num_ent, &num_val, handles, &status );
2739  if( is_error( status ) )
2740  {
2741  if( read_type ) H5Tclose( read_type );
2742  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2743  }
2744 
2745  table_type = false;
2746  if( read_type == 0 )
2747  {
2748  read_type = H5Dget_type( handles[1] );
2749  if( read_type == 0 )
2750  {
2751  mhdf_closeData( filePtr, handles[0], &status );
2752  mhdf_closeData( filePtr, handles[0], &status );
2753  if( fileInfo->tags[tag_index].size <= 0 ) mhdf_closeData( filePtr, handles[2], &status );
2754  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2755  }
2756  table_type = true;
2757  }
2758 
2759  if( fileInfo->tags[tag_index].size > 0 )
2760  {
2761  dbgOut.printf( 2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
2762  rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
2763  }
2764  else
2765  {
2766  dbgOut.printf( 2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
2767  rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
2768  }
2769 
2770  if( table_type )
2771  {
2772  H5Tclose( read_type );
2773  read_type = 0;
2774  }
2775 
2776  mhdf_closeData( filePtr, handles[0], &status );
2777  if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
2778  mhdf_closeData( filePtr, handles[1], &status );
2779  if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
2780  if( fileInfo->tags[tag_index].size <= 0 )
2781  {
2782  mhdf_closeData( filePtr, handles[2], &status );
2783  if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
2784  }
2785  if( MB_SUCCESS != rval )
2786  {
2787  if( read_type ) H5Tclose( read_type );
2788  MB_SET_ERR( rval, "ReadHDF5 Failure" );
2789  }
2790  }
2791 
2792  for( int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j )
2793  {
2794  long count;
2795  const char* name = 0;
2796  mhdf_EntDesc* desc;
2797  int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
2798  if( elem_idx == -2 )
2799  {
2800  desc = &fileInfo->sets;
2801  name = mhdf_set_type_handle();
2802  }
2803  else if( elem_idx == -1 )
2804  {
2805  desc = &fileInfo->nodes;
2806  name = mhdf_node_type_handle();
2807  }
2808  else if( elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc )
2809  {
2810  desc = &fileInfo->elems[elem_idx].desc;
2811  name = fileInfo->elems[elem_idx].handle;
2812  }
2813  else
2814  {
2815  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2816  }
2817 
2818  dbgOut.printf( 2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
2819 
2820  hid_t handle = mhdf_openDenseTagData( filePtr, fileInfo->tags[tag_index].name, name, &count, &status );
2821  if( is_error( status ) )
2822  {
2823  rval = MB_FAILURE; // rval = error(MB_FAILURE);
2824  break;
2825  }
2826 
2827  if( count > desc->count )
2828  {
2829  mhdf_closeData( filePtr, handle, &status );
2830  MB_SET_ERR( MB_FAILURE,
2831  "Invalid data length for dense tag data: " << name << "/" << fileInfo->tags[tag_index].name );
2832  }
2833 
2834  table_type = false;
2835  if( read_type == 0 )
2836  {
2837  read_type = H5Dget_type( handle );
2838  if( read_type == 0 )
2839  {
2840  mhdf_closeData( filePtr, handle, &status );
2841  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2842  }
2843  table_type = true;
2844  }
2845 
2846  rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
2847 
2848  if( table_type )
2849  {
2850  H5Tclose( read_type );
2851  read_type = 0;
2852  }
2853 
2854  mhdf_closeData( filePtr, handle, &status );
2855  if( MB_SUCCESS != rval ) break;
2856  if( is_error( status ) )
2857  {
2858  rval = MB_FAILURE;
2859  break;
2860  }
2861  }
2862 
2863  if( read_type ) H5Tclose( read_type );
2864  return rval;
2865 }
2866 
2867 ErrorCode ReadHDF5::create_tag( const mhdf_TagDesc& info, Tag& handle, hid_t& hdf_type )
2868 {
2870 
2871  ErrorCode rval;
2872  mhdf_Status status;
2873  TagType storage;
2874  DataType mb_type;
2875  bool re_read_default = false;
2876 
2877  switch( info.storage )
2878  {
2879  case mhdf_DENSE_TYPE:
2880  storage = MB_TAG_DENSE;
2881  break;
2882  case mhdf_SPARSE_TYPE:
2883  storage = MB_TAG_SPARSE;
2884  break;
2885  case mhdf_BIT_TYPE:
2886  storage = MB_TAG_BIT;
2887  break;
2888  case mhdf_MESH_TYPE:
2889  storage = MB_TAG_MESH;
2890  break;
2891  default:
2892  MB_SET_ERR( MB_FAILURE, "Invalid storage type for tag '" << info.name << "': " << info.storage );
2893  }
2894 
2895  // Type-specific stuff
2896  if( info.type == mhdf_BITFIELD )
2897  {
2898  if( info.size < 1 || info.size > 8 )
2899  {
2900  MB_SET_ERR( MB_FAILURE, "Invalid bit tag: class is MB_TAG_BIT, num bits = " << info.size );
2901  }
2902  hdf_type = H5Tcopy( H5T_NATIVE_B8 );
2903  mb_type = MB_TYPE_BIT;
2904  if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2905  }
2906  else if( info.type == mhdf_OPAQUE )
2907  {
2908  mb_type = MB_TYPE_OPAQUE;
2909 
2910  // Check for user-provided type
2911  Tag type_handle;
2912  std::string tag_type_name = "__hdf5_tag_type_";
2913  tag_type_name += info.name;
2914  rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof( hid_t ), MB_TYPE_OPAQUE, type_handle );
2915  if( MB_SUCCESS == rval )
2916  {
2917  EntityHandle root = 0;
2918  rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
2919  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
2920  hdf_type = H5Tcopy( hdf_type );
2921  re_read_default = true;
2922  }
2923  else if( MB_TAG_NOT_FOUND == rval )
2924  {
2925  hdf_type = 0;
2926  }
2927  else
2928  MB_SET_ERR( rval, "ReadHDF5 Failure" );
2929 
2930  if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2931  }
2932  else
2933  {
2934  switch( info.type )
2935  {
2936  case mhdf_INTEGER:
2937  hdf_type = H5T_NATIVE_INT;
2938  mb_type = MB_TYPE_INTEGER;
2939  break;
2940  case mhdf_FLOAT:
2941  hdf_type = H5T_NATIVE_DOUBLE;
2942  mb_type = MB_TYPE_DOUBLE;
2943  break;
2944  case mhdf_BOOLEAN:
2945  hdf_type = H5T_NATIVE_UINT;
2946  mb_type = MB_TYPE_INTEGER;
2947  break;
2948  case mhdf_ENTITY_ID:
2949  hdf_type = handleType;
2950  mb_type = MB_TYPE_HANDLE;
2951  break;
2952  default:
2953  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2954  }
2955 
2956  if( info.size > 1 )
2957  { // Array
2958  hsize_t tmpsize = info.size;
2959 #if defined( H5Tarray_create_vers ) && H5Tarray_create_vers > 1
2960  hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
2961 #else
2962  hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
2963 #endif
2964  }
2965  else
2966  {
2967  hdf_type = H5Tcopy( hdf_type );
2968  }
2969  if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
2970  }
2971 
2972  // If default or global/mesh value in file, read it.
2973  if( info.default_value || info.global_value )
2974  {
2975  if( re_read_default )
2976  {
2977  mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
2978  if( mhdf_isError( &status ) )
2979  {
2980  if( hdf_type ) H5Tclose( hdf_type );
2981  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
2982  }
2983  }
2984 
2985  if( MB_TYPE_HANDLE == mb_type )
2986  {
2987  if( info.default_value )
2988  {
2990  if( MB_SUCCESS != rval )
2991  {
2992  if( hdf_type ) H5Tclose( hdf_type );
2993  MB_SET_ERR( rval, "ReadHDF5 Failure" );
2994  }
2995  }
2996  if( info.global_value )
2997  {
2999  if( MB_SUCCESS != rval )
3000  {
3001  if( hdf_type ) H5Tclose( hdf_type );
3002  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3003  }
3004  }
3005  }
3006  }
3007 
3008  // Get tag handle, creating if necessary
3009  if( info.size < 0 )
3010  rval = iFace->tag_get_handle( info.name, info.default_value_size, mb_type, handle,
3011  storage | MB_TAG_CREAT | MB_TAG_VARLEN | MB_TAG_DFTOK, info.default_value );
3012  else
3013  rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle, storage | MB_TAG_CREAT | MB_TAG_DFTOK,
3014  info.default_value );
3015  if( MB_SUCCESS != rval )
3016  {
3017  if( hdf_type ) H5Tclose( hdf_type );
3018  MB_SET_ERR( MB_FAILURE, "Tag type in file does not match type in database for \"" << info.name << "\"" );
3019  }
3020 
3021  if( info.global_value )
3022  {
3023  EntityHandle root = 0;
3024  if( info.size > 0 )
3025  { // Fixed-length tag
3026  rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
3027  }
3028  else
3029  {
3030  int tag_size = info.global_value_size;
3031  rval = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
3032  }
3033  if( MB_SUCCESS != rval )
3034  {
3035  if( hdf_type ) H5Tclose( hdf_type );
3036  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3037  }
3038  }
3039 
3040  return MB_SUCCESS;
3041 }
3042 
3044  const char* ent_name,
3045  hid_t hdf_read_type,
3046  hid_t data,
3047  long start_id,
3048  long num_values )
3049 {
3051 
3052  ErrorCode rval;
3053  DataType mb_type;
3054 
3055  rval = iFace->tag_get_data_type( tag_handle, mb_type );
3056  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3057 
3058  int read_size;
3059  rval = iFace->tag_get_bytes( tag_handle, read_size );
3060  if( MB_SUCCESS != rval ) // Wrong function for variable-length tags
3061  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3062  // if (MB_TYPE_BIT == mb_type)
3063  // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
3064 
3065  if( hdf_read_type )
3066  { // If not opaque
3067  hsize_t hdf_size = H5Tget_size( hdf_read_type );
3068  if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3069  }
3070 
3071  // Get actual entities read from file
3072  Range file_ids, handles;
3073  Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
3074  IDMap::iterator l, u;
3075  l = idMap.lower_bound( start_id );
3076  u = idMap.lower_bound( start_id + num_values - 1 );
3077  if( l != idMap.end() && start_id + num_values > l->begin )
3078  {
3079  if( l == u )
3080  {
3081  size_t beg = std::max( start_id, l->begin );
3082  size_t end = std::min( start_id + num_values, u->begin + u->count ) - 1;
3083  f_ins = file_ids.insert( f_ins, beg, end );
3084  h_ins = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + ( end - l->begin ) );
3085  }
3086  else
3087  {
3088  size_t beg = std::max( start_id, l->begin );
3089  f_ins = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
3090  h_ins = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + l->count - 1 );
3091  for( ++l; l != u; ++l )
3092  {
3093  f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
3094  h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
3095  }
3096  if( u != idMap.end() && u->begin < start_id + num_values )
3097  {
3098  size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
3099  f_ins = file_ids.insert( f_ins, u->begin, end );
3100  h_ins = handles.insert( h_ins, u->value, u->value + end - u->begin );
3101  }
3102  }
3103  }
3104 
3105  // Given that all of the entities for this dense tag data should
3106  // have been created as a single contiguous block, the resulting
3107  // MOAB handle range should be contiguous.
3108  // THE ABOVE IS NOT NECESSARILY TRUE. SOMETIMES LOWER-DIMENSION
3109  // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
3110  // assert(handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
3111 
3112  std::string tn( "<error>" );
3113  iFace->tag_get_name( tag_handle, tn );
3114  tn += " data for ";
3115  tn += ent_name;
3116  try
3117  {
3118  h_ins = handles.begin();
3119  ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
3120  long buffer_size = bufferSize / read_size;
3121  reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
3122  dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tn.c_str(), ent_name,
3123  reader.get_read_count() );
3124  int nn = 0;
3125  while( !reader.done() )
3126  {
3127  dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
3128 
3129  size_t count;
3130  reader.read( dataBuffer, count );
3131 
3132  if( MB_TYPE_HANDLE == mb_type )
3133  {
3134  rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof( EntityHandle ) );
3135  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3136  }
3137 
3138  Range ents;
3139  Range::iterator end = h_ins;
3140  end += count;
3141  ents.insert( h_ins, end );
3142  h_ins = end;
3143 
3144  rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
3145  if( MB_SUCCESS != rval )
3146  {
3147  dbgOut.printf( 1, "Internal error setting data for tag \"%s\"\n", tn.c_str() );
3148  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3149  }
3150  }
3151  }
3153  {
3154  dbgOut.printf( 1, "Internal error reading dense data for tag \"%s\"\n", tn.c_str() );
3155  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3156  }
3157 
3158  return MB_SUCCESS;
3159 }
3160 
3161 // Read entire ID table and for those file IDs corresponding
3162 // to entities that we have read from the file add both the
3163 // offset into the offset range and the handle into the handle
3164 // range. If handles are not ordered, switch to using a vector.
3166  hid_t id_table,
3167  EntityHandle start_offset, // Can't put zero in a Range
3168  Range& offset_range,
3169  Range& handle_range,
3170  std::vector< EntityHandle >& handle_vect )
3171 {
3173 
3174  offset_range.clear();
3175  handle_range.clear();
3176  handle_vect.clear();
3177 
3178  ErrorCode rval;
3179  Range::iterator handle_hint = handle_range.begin();
3180  Range::iterator offset_hint = offset_range.begin();
3181 
3183  size_t idbuf_size = bufferSize / sizeof( EntityHandle );
3184 
3185  std::string tn( name );
3186  tn += " indices";
3187 
3188  assert( start_offset > 0 ); // Can't put zero in a Range
3189  try
3190  {
3191  ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
3192  id_reader.set_all_file_ids( idbuf_size, handleType );
3193  size_t offset = start_offset;
3194  dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
3195  int nn = 0;
3196  while( !id_reader.done() )
3197  {
3198  dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
3199  size_t count;
3200  id_reader.read( idbuf, count );
3201 
3202  rval = convert_id_to_handle( idbuf, count );
3203  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3204 
3205  // idbuf will now contain zero-valued handles for those
3206  // tag values that correspond to entities we are not reading
3207  // from the file.
3208  for( size_t i = 0; i < count; ++i )
3209  {
3210  if( idbuf[i] )
3211  {
3212  offset_hint = offset_range.insert( offset_hint, offset + i );
3213  if( !handle_vect.empty() )
3214  {
3215  handle_vect.push_back( idbuf[i] );
3216  }
3217  else if( handle_range.empty() || idbuf[i] > handle_range.back() )
3218  {
3219  handle_hint = handle_range.insert( handle_hint, idbuf[i] );
3220  }
3221  else
3222  {
3223  handle_vect.resize( handle_range.size() );
3224  std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
3225  handle_range.clear();
3226  handle_vect.push_back( idbuf[i] );
3227  dbgOut.print( 2, "Switching to unordered list for tag handle list\n" );
3228  }
3229  }
3230  }
3231 
3232  offset += count;
3233  }
3234  }
3236  {
3237  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3238  }
3239 
3240  return MB_SUCCESS;
3241 }
3242 
3244  hid_t hdf_read_type,
3245  hid_t id_table,
3246  hid_t value_table,
3247  long /*num_values*/ )
3248 {
3250 
3251  // Read entire ID table and for those file IDs corresponding
3252  // to entities that we have read from the file add both the
3253  // offset into the offset range and the handle into the handle
3254  // range. If handles are not ordered, switch to using a vector.
3255  const EntityHandle base_offset = 1; // Can't put zero in a Range
3256  std::vector< EntityHandle > handle_vect;
3257  Range handle_range, offset_range;
3258  std::string tn( "<error>" );
3259  iFace->tag_get_name( tag_handle, tn );
3260  ErrorCode rval =
3261  read_sparse_tag_indices( tn.c_str(), id_table, base_offset, offset_range, handle_range, handle_vect );
3262  if( MB_SUCCESS != rval ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3263 
3264  DataType mbtype;
3265  rval = iFace->tag_get_data_type( tag_handle, mbtype );
3266  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3267 
3268  int read_size;
3269  rval = iFace->tag_get_bytes( tag_handle, read_size );
3270  if( MB_SUCCESS != rval ) // Wrong function for variable-length tags
3271  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3272  // if (MB_TYPE_BIT == mbtype)
3273  // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
3274 
3275  if( hdf_read_type )
3276  { // If not opaque
3277  hsize_t hdf_size = H5Tget_size( hdf_read_type );
3278  if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3279  }
3280 
3281  const int handles_per_tag = read_size / sizeof( EntityHandle );
3282 
3283  // Now read data values
3284  size_t chunk_size = bufferSize / read_size;
3285  try
3286  {
3287  ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), value_table, nativeParallel, mpiComm, false );
3288  val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
3289  dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(),
3290  val_reader.get_read_count() );
3291  int nn = 0;
3292  size_t offset = 0;
3293  while( !val_reader.done() )
3294  {
3295  dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
3296  size_t count;
3297  val_reader.read( dataBuffer, count );
3298  if( MB_TYPE_HANDLE == mbtype )
3299  {
3300  rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * handles_per_tag );
3301  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3302  }
3303 
3304  if( !handle_vect.empty() )
3305  {
3306  rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
3307  offset += count;
3308  }
3309  else
3310  {
3311  Range r;
3312  r.merge( handle_range.begin(), handle_range.begin() + count );
3313  handle_range.erase( handle_range.begin(), handle_range.begin() + count );
3314  rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
3315  }
3316  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3317  }
3318  }
3320  {
3321  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3322  }
3323 
3324  return MB_SUCCESS;
3325 }
3326 
3328  hid_t hdf_read_type,
3329  hid_t ent_table,
3330  hid_t val_table,
3331  hid_t off_table,
3332  long /*num_entities*/,
3333  long /*num_values*/ )
3334 {
3336 
3337  ErrorCode rval;
3338  DataType mbtype;
3339 
3340  rval = iFace->tag_get_data_type( tag_handle, mbtype );
3341  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3342 
3343  // Can't do variable-length bit tags
3344  if( MB_TYPE_BIT == mbtype ) MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
3345 
3346  // If here, MOAB tag must be variable-length
3347  int mbsize;
3348  if( MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize ) )
3349  {
3350  assert( false );MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
3351  }
3352 
3353  int read_size;
3354  if( hdf_read_type )
3355  {
3356  hsize_t hdf_size = H5Tget_size( hdf_read_type );
3357  if( hdf_size < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3358  read_size = hdf_size;
3359  }
3360  else
3361  {
3362  // Opaque
3363  read_size = 1;
3364  }
3365 
3366  std::string tn( "<error>" );
3367  iFace->tag_get_name( tag_handle, tn );
3368 
3369  // Read entire ID table and for those file IDs corresponding
3370  // to entities that we have read from the file add both the
3371  // offset into the offset range and the handle into the handle
3372  // range. If handles are not ordered, switch to using a vector.
3373  const EntityHandle base_offset = 1; // Can't put zero in a Range
3374  std::vector< EntityHandle > handle_vect;
3375  Range handle_range, offset_range;
3376  rval = read_sparse_tag_indices( tn.c_str(), ent_table, base_offset, offset_range, handle_range, handle_vect );
3377 
3378  // This code only works if the id_table is an ordered list.
3379  // This assumption was also true for the previous iteration
3380  // of this code, but wasn't checked. MOAB's file writer
3381  // always writes an ordered list for id_table.
3382  if( !handle_vect.empty() )
3383  {
3384  MB_SET_ERR( MB_FAILURE, "Unordered file ids for variable length tag not supported" );
3385  }
3386 
3387  class VTReader : public ReadHDF5VarLen
3388  {
3389  Tag tagHandle;
3390  bool isHandle;
3391  size_t readSize;
3392  ReadHDF5* readHDF5;
3393 
3394  public:
3395  ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
3396  {
3397  ErrorCode rval1;
3398  if( isHandle )
3399  {
3400  if( readSize != sizeof( EntityHandle ) ) MB_CHK_SET_ERR( MB_FAILURE, "Invalid read size" );
3401  rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );MB_CHK_ERR( rval1 );
3402  }
3403  int n = count;
3404  return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
3405  }
3406  VTReader( DebugOutput& debug_output,
3407  void* buffer,
3408  size_t buffer_size,
3409  Tag tag,
3410  bool is_handle_tag,
3411  size_t read_size1,
3412  ReadHDF5* owner )
3413  : ReadHDF5VarLen( debug_output, buffer, buffer_size ), tagHandle( tag ), isHandle( is_handle_tag ),
3414  readSize( read_size1 ), readHDF5( owner )
3415  {
3416  }
3417  };
3418 
3419  VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, MB_TYPE_HANDLE == mbtype, read_size, this );
3420  try
3421  {
3422  // Read offsets into value table.
3423  std::vector< unsigned > counts;
3424  Range offsets;
3425  ReadHDF5Dataset off_reader( ( tn + " offsets" ).c_str(), off_table, nativeParallel, mpiComm, false );
3426  rval = tool.read_offsets( off_reader, offset_range, base_offset, base_offset, offsets, counts );
3427  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3428 
3429  // Read tag values
3430  Range empty;
3431  ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), val_table, nativeParallel, mpiComm, false );
3432  rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type, handle_range, counts, empty );
3433  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3434  }
3436  {
3437  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3438  }
3439 
3440  return MB_SUCCESS;
3441 }
3442 
3444 {
3445  convert_id_to_handle( array, size, idMap );
3446  return MB_SUCCESS;
3447 }
3448 
3450 {
3451  for( EntityHandle* const end = array + size; array != end; ++array )
3452  *array = id_map.find( *array );
3453 }
3454 
3456  size_t size,
3457  size_t& new_size,
3458  const RangeMap< long, EntityHandle >& id_map )
3459 {
3461  new_size = 0;
3462  for( size_t i = 0; i < size; ++i )
3463  {
3464  it = id_map.lower_bound( array[i] );
3465  if( it != id_map.end() && it->begin <= (long)array[i] )
3466  array[new_size++] = it->value + ( array[i] - it->begin );
3467  }
3468 }
3469 
3471  size_t num_ranges,
3472  const RangeMap< long, EntityHandle >& id_map,
3473  Range& merge )
3474 {
3476  Range::iterator hint = merge.begin();
3477  for( size_t i = 0; i < num_ranges; ++i )
3478  {
3479  long id = ranges[2 * i];
3480  const long end = id + ranges[2 * i + 1];
3481  // We assume that 'ranges' is sorted, but check just in case it isn't.
3482  if( it == id_map.end() || it->begin > id ) it = id_map.begin();
3483  it = id_map.lower_bound( it, id_map.end(), id );
3484  if( it == id_map.end() ) continue;
3485  if( id < it->begin ) id = it->begin;
3486  while( id < end )
3487  {
3488  if( id < it->begin ) id = it->begin;
3489  const long off = id - it->begin;
3490  long count = std::min( it->count - off, end - id );
3491  // It is possible that this new subrange is starting after the end
3492  // It will result in negative count, which does not make sense
3493  // We are done with this range, go to the next one
3494  if( count <= 0 ) break;
3495  hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
3496  id += count;
3497  if( id < end )
3498  {
3499  if( ++it == id_map.end() ) break;
3500  if( it->begin > end ) break;
3501  }
3502  }
3503  }
3504 }
3505 
3506 ErrorCode ReadHDF5::convert_range_to_handle( const EntityHandle* array, size_t num_ranges, Range& range )
3507 {
3508  convert_range_to_handle( array, num_ranges, idMap, range );
3509  return MB_SUCCESS;
3510 }
3511 
3513 {
3514  IDMap tmp_map;
3515  bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
3516  IDMap& map = merge ? tmp_map : idMap;
3518  for( p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p )
3519  {
3520  size_t count = p->second - p->first + 1;
3521  if( !map.insert( p->first, start_id, count ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3522  start_id += count;
3523  }
3524  if( merge && !idMap.merge( tmp_map ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3525 
3526  return MB_SUCCESS;
3527 }
3528 
3530 {
3531  if( !idMap.insert( file_id, handle, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3532  return MB_SUCCESS;
3533 }
3534 
3536 {
3538 
3539  mhdf_Status status;
3540  // std::vector<std::string> qa_list;
3541 
3542  int qa_len;
3543  char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
3544  if( mhdf_isError( &status ) )
3545  {
3546  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3547  }
3548  // qa_list.resize(qa_len);
3549  for( int i = 0; i < qa_len; i++ )
3550  {
3551  // qa_list[i] = qa[i];
3552  free( qa[i] );
3553  }
3554  free( qa );
3555 
3556  /** FIX ME - how to put QA list on set?? */
3557 
3558  return MB_SUCCESS;
3559 }
3560 
3562 {
3564 
3565  // typedef int tag_type;
3566  typedef long tag_type;
3567  // change it to be able to read much bigger files (long is 64 bits ...)
3568 
3569  tag_type* buffer = reinterpret_cast< tag_type* >( dataBuffer );
3570  const long buffer_size = bufferSize / sizeof( tag_type );
3571  for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
3572  {
3573  IDMap::Range range = *i;
3574 
3575  // Make sure the values will fit in the tag type
3576  IDMap::key_type rv = range.begin + ( range.count - 1 );
3577  tag_type tv = (tag_type)rv;
3578  if( (IDMap::key_type)tv != rv )
3579  {
3580  assert( false );
3581  return MB_INDEX_OUT_OF_RANGE;
3582  }
3583 
3584  while( range.count )
3585  {
3586  long count = buffer_size < range.count ? buffer_size : range.count;
3587 
3588  Range handles;
3589  handles.insert( range.value, range.value + count - 1 );
3590  range.value += count;
3591  range.count -= count;
3592  for( long j = 0; j < count; ++j )
3593  buffer[j] = (tag_type)range.begin++;
3594 
3595  ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
3596  if( MB_SUCCESS != rval ) return rval;
3597  }
3598  }
3599 
3600  return MB_SUCCESS;
3601 }
3602 
3604 {
3606 
3607  // create a tag that will not be saved, but it will be
3608  // used by visit plugin to match the sets and their file ids
3609  // it is the same type as the tag defined in ReadParallelcpp, for file id
3610  Tag setFileIdTag;
3611  long default_val = 0;
3612  ErrorCode rval = iFace->tag_get_handle( "__FILE_ID_FOR_SETS", sizeof( long ), MB_TYPE_OPAQUE, setFileIdTag,
3613  ( MB_TAG_DENSE | MB_TAG_CREAT ), &default_val );
3614 
3615  if( MB_SUCCESS != rval || 0 == setFileIdTag ) return rval;
3616  // typedef int tag_type;
3617  typedef long tag_type;
3618  // change it to be able to read much bigger files (long is 64 bits ...)
3619 
3620  tag_type* buffer = reinterpret_cast< tag_type* >( dataBuffer );
3621  const long buffer_size = bufferSize / sizeof( tag_type );
3622  for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
3623  {
3624  IDMap::Range range = *i;
3625  EntityType htype = iFace->type_from_handle( range.value );
3626  if( MBENTITYSET != htype ) continue;
3627  // work only with entity sets
3628  // Make sure the values will fit in the tag type
3629  IDMap::key_type rv = range.begin + ( range.count - 1 );
3630  tag_type tv = (tag_type)rv;
3631  if( (IDMap::key_type)tv != rv )
3632  {
3633  assert( false );
3634  return MB_INDEX_OUT_OF_RANGE;
3635  }
3636 
3637  while( range.count )
3638  {
3639  long count = buffer_size < range.count ? buffer_size : range.count;
3640 
3641  Range handles;
3642  handles.insert( range.value, range.value + count - 1 );
3643  range.value += count;
3644  range.count -= count;
3645  for( long j = 0; j < count; ++j )
3646  buffer[j] = (tag_type)range.begin++;
3647 
3648  rval = iFace->tag_set_data( setFileIdTag, handles, buffer );
3649  if( MB_SUCCESS != rval ) return rval;
3650  }
3651  }
3652  return MB_SUCCESS;
3653 }
3654 
3655 ErrorCode ReadHDF5::read_tag_values( const char* file_name,
3656  const char* tag_name,
3657  const FileOptions& opts,
3658  std::vector< int >& tag_values_out,
3659  const SubsetList* subset_list )
3660 {
3661  ErrorCode rval;
3662 
3663  rval = set_up_read( file_name, opts );
3664  if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
3665 
3666  int tag_index;
3667  rval = find_int_tag( tag_name, tag_index );
3668  if( MB_SUCCESS != rval )
3669  {
3670  clean_up_read( opts );
3671  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3672  }
3673 
3674  if( subset_list )
3675  {
3676  Range file_ids;
3677  rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
3678  if( MB_SUCCESS != rval )
3679  {
3680  clean_up_read( opts );
3681  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3682  }
3683 
3684  rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
3685  if( MB_SUCCESS != rval )
3686  {
3687  clean_up_read( opts );
3688  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3689  }
3690  }
3691  else
3692  {
3693  rval = read_tag_values_all( tag_index, tag_values_out );
3694  if( MB_SUCCESS != rval )
3695  {
3696  clean_up_read( opts );
3697  MB_SET_ERR( rval, "ReadHDF5 Failure" );
3698  }
3699  }
3700 
3701  return clean_up_read( opts );
3702 }
3703 
3704 ErrorCode ReadHDF5::read_tag_values_partial( int tag_index, const Range& file_ids, std::vector< int >& tag_values )
3705 {
3707 
3708  mhdf_Status status;
3709  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
3710  long num_ent, num_val;
3711  size_t count;
3712  std::string tn( tag.name );
3713 
3714  // Read sparse values
3715  if( tag.have_sparse )
3716  {
3717  hid_t handles[3];
3718  mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
3719  if( mhdf_isError( &status ) )
3720  {
3721  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3722  }
3723 
3724  try
3725  {
3726  // Read all entity handles and fill 'offsets' with ranges of
3727  // offsets into the data table for entities that we want.
3728  Range offsets;
3729  long* buffer = reinterpret_cast< long* >( dataBuffer );
3730  const long buffer_size = bufferSize / sizeof( long );
3731  ReadHDF5Dataset ids( ( tn + " ids" ).c_str(), handles[0], nativeParallel, mpiComm );
3732  ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
3733  size_t offset = 0;
3734  dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n", tag.name, ids.get_read_count() );
3735  int nn = 0;
3736  while( !ids.done() )
3737  {
3738  dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
3739  ids.read( buffer, count );
3740 
3741  std::sort( buffer, buffer + count );
3742  Range::iterator ins = offsets.begin();
3743  Range::const_iterator i = file_ids.begin();
3744  for( size_t j = 0; j < count; ++j )
3745  {
3746  while( i != file_ids.end() && (long)*i < buffer[j] )
3747  ++i;
3748  if( i == file_ids.end() ) break;
3749  if( (long)*i == buffer[j] )
3750  {
3751  ins = offsets.insert( ins, j + offset, j + offset );
3752  }
3753  }
3754 
3755  offset += count;
3756  }
3757 
3758  tag_values.clear();
3759  tag_values.reserve( offsets.size() );
3760  const size_t data_buffer_size = bufferSize / sizeof( int );
3761  int* data_buffer = reinterpret_cast< int* >( dataBuffer );
3762  ReadHDF5Dataset vals( ( tn + " sparse vals" ).c_str(), handles[1], nativeParallel, mpiComm );
3763  vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
3764  dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tag.name, vals.get_read_count() );
3765  nn = 0;
3766  // Should normally only have one read call, unless sparse nature
3767  // of file_ids caused reader to do something strange
3768  while( !vals.done() )
3769  {
3770  dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
3771  vals.read( data_buffer, count );
3772  tag_values.insert( tag_values.end(), data_buffer, data_buffer + count );
3773  }
3774  }
3776  {
3777  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3778  }
3779  }
3780 
3781  std::sort( tag_values.begin(), tag_values.end() );
3782  tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
3783 
3784  // Read dense values
3785  std::vector< int > prev_data, curr_data;
3786  for( int i = 0; i < tag.num_dense_indices; ++i )
3787  {
3788  int grp = tag.dense_elem_indices[i];
3789  const char* gname = 0;
3790  mhdf_EntDesc* desc = 0;
3791  if( grp == -1 )
3792  {
3793  gname = mhdf_node_type_handle();
3794  desc = &fileInfo->nodes;
3795  }
3796  else if( grp == -2 )
3797  {
3798  gname = mhdf_set_type_handle();
3799  desc = &fileInfo->sets;
3800  }
3801  else
3802  {
3803  assert( grp >= 0 && grp < fileInfo->num_elem_desc );
3804  gname = fileInfo->elems[grp].handle;
3805  desc = &fileInfo->elems[grp].desc;
3806  }
3807 
3808  Range::iterator s = file_ids.lower_bound( (EntityHandle)( desc->start_id ) );
3809  Range::iterator e = Range::lower_bound( s, file_ids.end(), (EntityHandle)( desc->start_id ) + desc->count );
3810  Range subset;
3811  subset.merge( s, e );
3812 
3813  hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
3814  if( mhdf_isError( &status ) )
3815  {
3816  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3817  }
3818 
3819  try
3820  {
3821  curr_data.clear();
3822  tag_values.reserve( subset.size() );
3823  const size_t data_buffer_size = bufferSize / sizeof( int );
3824  int* data_buffer = reinterpret_cast< int* >( dataBuffer );
3825 
3826  ReadHDF5Dataset reader( ( tn + " dense vals" ).c_str(), handle, nativeParallel, mpiComm );
3827  reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
3828  dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tag.name,
3829  fileInfo->elems[grp].handle, reader.get_read_count() );
3830  int nn = 0;
3831  // Should normally only have one read call, unless sparse nature
3832  // of file_ids caused reader to do something strange
3833  while( !reader.done() )
3834  {
3835  dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
3836  reader.read( data_buffer, count );
3837  curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
3838  }
3839  }
3841  {
3842  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3843  }
3844 
3845  std::sort( curr_data.begin(), curr_data.end() );
3846  curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
3847  prev_data.clear();
3848  tag_values.swap( prev_data );
3849  std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
3850  std::back_inserter( tag_values ) );
3851  }
3852 
3853  return MB_SUCCESS;
3854 }
3855 
3856 ErrorCode ReadHDF5::read_tag_values_all( int tag_index, std::vector< int >& tag_values )
3857 {
3859 
3860  mhdf_Status status;
3861  const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
3862  long junk, num_val;
3863 
3864  // Read sparse values
3865  if( tag.have_sparse )
3866  {
3867  hid_t handles[3];
3868  mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
3869  if( mhdf_isError( &status ) )
3870  {
3871  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3872  }
3873 
3874  mhdf_closeData( filePtr, handles[0], &status );
3875  if( mhdf_isError( &status ) )
3876  {
3877  MB_SET_ERR_CONT( mhdf_message( &status ) );
3878  mhdf_closeData( filePtr, handles[1], &status );
3879  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3880  }
3881 
3882  hid_t file_type = H5Dget_type( handles[1] );
3883  tag_values.resize( num_val );
3884  mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type, &tag_values[0], collIO, &status );
3885  if( mhdf_isError( &status ) )
3886  {
3887  MB_SET_ERR_CONT( mhdf_message( &status ) );
3888  H5Tclose( file_type );
3889  mhdf_closeData( filePtr, handles[1], &status );
3890  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3891  }
3892  H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
3893  H5Tclose( file_type );
3894 
3895  mhdf_closeData( filePtr, handles[1], &status );
3896  if( mhdf_isError( &status ) )
3897  {
3898  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3899  }
3900  }
3901 
3902  std::sort( tag_values.begin(), tag_values.end() );
3903  tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
3904 
3905  // Read dense values
3906  std::vector< int > prev_data, curr_data;
3907  for( int i = 0; i < tag.num_dense_indices; ++i )
3908  {
3909  int grp = tag.dense_elem_indices[i];
3910  const char* gname = 0;
3911  if( grp == -1 )
3912  gname = mhdf_node_type_handle();
3913  else if( grp == -2 )
3914  gname = mhdf_set_type_handle();
3915  else
3916  gname = fileInfo->elems[grp].handle;
3917  hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
3918  if( mhdf_isError( &status ) )
3919  {
3920  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3921  }
3922 
3923  hid_t file_type = H5Dget_type( handle );
3924  curr_data.resize( num_val );
3925  mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
3926  if( mhdf_isError( &status ) )
3927  {
3928  MB_SET_ERR_CONT( mhdf_message( &status ) );
3929  H5Tclose( file_type );
3930  mhdf_closeData( filePtr, handle, &status );
3931  MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
3932  }
3933 
3934  H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
3935  H5Tclose( file_type );
3936  mhdf_closeData( filePtr, handle, &status );
3937  if( mhdf_isError( &status ) )
3938  {
3939  MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
3940  }
3941 
3942  std::sort( curr_data.begin(), curr_data.end() );
3943  curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
3944 
3945  prev_data.clear();
3946  tag_values.swap( prev_data );
3947  std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
3948  std::back_inserter( tag_values ) );
3949  }
3950 
3951  return MB_SUCCESS;
3952 }
3954 {
3955 #ifdef MOAB_HAVE_MPI
3956  if( !myPcomm )
3957  {
3958  double recv[NUM_TIMES];
3959  MPI_Reduce( (void*)_times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
3960  for( int i = 0; i < NUM_TIMES; i++ )
3961  _times[i] = recv[i]; // just get the max from all of them
3962  }
3963  if( 0 == myPcomm->proc_config().proc_rank() )
3964  {
3965 #endif
3966 
3967  std::cout << "ReadHDF5: " << _times[TOTAL_TIME] << std::endl
3968  << " get set meta " << _times[SET_META_TIME] << std::endl
3969  << " partial subsets " << _times[SUBSET_IDS_TIME] << std::endl
3970  << " partition time " << _times[GET_PARTITION_TIME] << std::endl
3971  << " get set ids " << _times[GET_SET_IDS_TIME] << std::endl
3972  << " set contents " << _times[GET_SET_CONTENTS_TIME] << std::endl
3973  << " polyhedra " << _times[GET_POLYHEDRA_TIME] << std::endl
3974  << " elements " << _times[GET_ELEMENTS_TIME] << std::endl
3975  << " nodes " << _times[GET_NODES_TIME] << std::endl
3976  << " node adjacency " << _times[GET_NODEADJ_TIME] << std::endl
3977  << " side elements " << _times[GET_SIDEELEM_TIME] << std::endl
3978  << " update connectivity " << _times[UPDATECONN_TIME] << std::endl
3979  << " adjacency " << _times[ADJACENCY_TIME] << std::endl
3980  << " delete non_adj " << _times[DELETE_NON_SIDEELEM_TIME] << std::endl
3981  << " recursive sets " << _times[READ_SET_IDS_RECURS_TIME] << std::endl
3982  << " find contain_sets " << _times[FIND_SETS_CONTAINING_TIME] << std::endl
3983  << " read sets " << _times[READ_SETS_TIME] << std::endl
3984  << " read tags " << _times[READ_TAGS_TIME] << std::endl
3985  << " store file ids " << _times[STORE_FILE_IDS_TIME] << std::endl
3986  << " read qa records " << _times[READ_QA_TIME] << std::endl;
3987 
3988 #ifdef MOAB_HAVE_MPI
3989  }
3990 #endif
3991 }
3992 
3993 } // namespace moab