Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
WriteHDF5.cpp
Go to the documentation of this file.
1 /**
2  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3  * storing and accessing finite element mesh data.
4  *
5  * Copyright 2004 Sandia Corporation. Under the terms of Contract
6  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7  * retains certain rights in this software.
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  */
15 
16 //-------------------------------------------------------------------------
17 // Filename : WriteHDF5.cpp
18 //
19 // Purpose : TSTT HDF5 Writer
20 //
21 // Special Notes : WriteSLAC used as template for this
22 //
23 // Creator : Jason Kraftcheck
24 //
25 // Creation Date : 04/01/04
26 //-------------------------------------------------------------------------
27 
28 #include <cassert>
29 #if defined( _MSC_VER )
30 typedef int id_t;
31 #elif defined( __MINGW32__ )
32 #include <sys/time.h>
33 #else
34 #include <ctime>
35 #endif
36 
37 #include <cstdlib>
38 #include <cstring>
39 #include <cstdarg>
40 #include <limits>
41 #include <cstdio>
42 #include <iostream>
43 #include "WriteHDF5.hpp"
44 #include <H5Tpublic.h>
45 #include <H5Ppublic.h>
46 #include <H5Epublic.h>
47 #include "moab/Interface.hpp"
48 #include "Internals.hpp"
49 #include "MBTagConventions.hpp"
50 #include "moab/CN.hpp"
51 #include "moab/FileOptions.hpp"
52 #include "moab/CpuTimer.hpp"
53 #include "IODebugTrack.hpp"
54 #include "mhdf.h"
55 
56 #ifndef MOAB_HAVE_HDF5
57 #error Attempt to compile WriteHDF5 with HDF5 support disabled
58 #endif
59 
60 #undef BLOCKED_COORD_IO
61 
62 #ifdef MOAB_HAVE_VALGRIND
63 #include <valgrind/memcheck.h>
64 
65 template < typename T >
66 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v )
67 {
68  (void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof( T ) );
69 }
70 
71 #else
72 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
73 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
74 #endif
75 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
76 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
77 #endif
78 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
79 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
80 #endif
81 
82 template < typename T >
83 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& )
84 {
85  (void)VALGRIND_MAKE_MEM_UNDEFINED( 0, 0 );
86 }
87 
88 #endif
89 
90 namespace moab
91 {
92 
93 #define WRITE_HDF5_BUFFER_SIZE ( 40 * 1024 * 1024 )
94 
95 static hid_t get_id_type()
96 {
97  if( 8 == sizeof( WriteHDF5::wid_t ) )
98  {
99  if( 8 == sizeof( long ) )
100  return H5T_NATIVE_ULONG;
101  else
102  return H5T_NATIVE_UINT64;
103  }
104  else if( 4 == sizeof( WriteHDF5::wid_t ) )
105  {
106  if( 4 == sizeof( int ) )
107  return H5T_NATIVE_UINT;
108  else
109  return H5T_NATIVE_UINT32;
110  }
111  else
112  {
113  assert( 0 );
114  return (hid_t)-1;
115  }
116 }
117 
118 // This is the HDF5 type used to store file IDs
119 const hid_t WriteHDF5::id_type = get_id_type();
120 
121 // This function doesn't do anything useful. It's just a nice
122 // place to set a break point to determine why the writer fails.
123 static inline ErrorCode error( ErrorCode rval )
124 {
125  return rval;
126 }
127 
128 // Call \c error function during HDF5 library errors to make
129 // it easier to trap such errors in the debugger. This function
130 // gets registered with the HDF5 library as a callback. It
131 // works the same as the default (H5Eprint), except that it
132 // also calls the \c error function as a no-op.
133 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
134 static herr_t handle_hdf5_error( hid_t stack, void* data )
135 {
136  WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
137  herr_t result = 0;
138  if( h->func ) result = ( *h->func )( stack, h->data );
139  error( MB_FAILURE );
140  return result;
141 }
142 #else
143 static herr_t handle_hdf5_error( void* data )
144 {
145  WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
146  herr_t result = 0;
147  if( h->func ) result = ( *h->func )( h->data );
148  error( MB_FAILURE );
149  return result;
150 }
151 #endif
152 
153 // Some macros to handle error checking. The
154 // CHK_MHDF__ERR* macros check the value of an mhdf_Status
155 // object. The CHK_MB_ERR_* check the value of an ErrorCode.
156 // The *_0 macros accept no other arguments. The *_1
157 // macros accept a single hdf5 handle to close on error.
158 // The *_2 macros accept an array of two hdf5 handles to
159 // close on error. The _*2C macros accept one hdf5 handle
160 // to close on error and a bool and an hdf5 handle where
161 // the latter handle is conditionally closed depending on
162 // the value of the bool. All macros contain a "return"
163 // statement.
164 #define CHK_MHDF_ERR_0( A ) \
165  do \
166  { \
167  if( mhdf_isError( &( A ) ) ) \
168  { \
169  MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
170  assert( 0 ); \
171  return error( MB_FAILURE ); \
172  } \
173  } while( false )
174 
175 #define CHK_MHDF_ERR_1( A, B ) \
176  do \
177  { \
178  if( mhdf_isError( &( A ) ) ) \
179  { \
180  MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
181  assert( 0 ); \
182  mhdf_closeData( filePtr, ( B ), &( A ) ); \
183  return error( MB_FAILURE ); \
184  } \
185  } while( false )
186 
187 #define CHK_MHDF_ERR_2( A, B ) \
188  do \
189  { \
190  if( mhdf_isError( &( A ) ) ) \
191  { \
192  MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
193  assert( 0 ); \
194  mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
195  mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
196  return error( MB_FAILURE ); \
197  } \
198  } while( false )
199 
200 #define CHK_MHDF_ERR_3( A, B ) \
201  do \
202  { \
203  if( mhdf_isError( &( A ) ) ) \
204  { \
205  MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
206  assert( 0 ); \
207  mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
208  mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
209  mhdf_closeData( filePtr, ( B )[2], &( A ) ); \
210  return error( MB_FAILURE ); \
211  } \
212  } while( false )
213 
214 #define CHK_MHDF_ERR_2C( A, B, C, D ) \
215  do \
216  { \
217  if( mhdf_isError( &( A ) ) ) \
218  { \
219  MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
220  assert( 0 ); \
221  mhdf_closeData( filePtr, ( B ), &( A ) ); \
222  if( C ) mhdf_closeData( filePtr, ( D ), &( A ) ); \
223  return error( MB_FAILURE ); \
224  } \
225  } while( false )
226 
227 #define CHK_MB_ERR_0( A ) \
228  do \
229  { \
230  if( MB_SUCCESS != ( A ) ) \
231  { \
232  MB_CHK_ERR_CONT( ( A ) ); \
233  return error( A ); \
234  } \
235  } while( false )
236 
237 #define CHK_MB_ERR_1( A, B, C ) \
238  do \
239  { \
240  if( MB_SUCCESS != ( A ) ) \
241  { \
242  MB_CHK_ERR_CONT( ( A ) ); \
243  mhdf_closeData( filePtr, ( B ), &( C ) ); \
244  assert( 0 ); \
245  return error( A ); \
246  } \
247  } while( false )
248 
249 #define CHK_MB_ERR_2( A, B, C ) \
250  do \
251  { \
252  if( MB_SUCCESS != ( A ) ) \
253  { \
254  MB_CHK_ERR_CONT( ( A ) ); \
255  mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
256  mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
257  write_finished(); \
258  assert( 0 ); \
259  return error( A ); \
260  } \
261  } while( false )
262 
263 #define CHK_MB_ERR_3( A, B, C ) \
264  do \
265  { \
266  if( MB_SUCCESS != ( A ) ) \
267  { \
268  MB_CHK_ERR_CONT( ( A ) ); \
269  mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
270  mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
271  mhdf_closeData( filePtr, ( B )[2], &( C ) ); \
272  write_finished(); \
273  assert( 0 ); \
274  return error( A ); \
275  } \
276  } while( false )
277 
278 #define CHK_MB_ERR_2C( A, B, C, D, E ) \
279  do \
280  { \
281  if( MB_SUCCESS != ( A ) ) \
282  { \
283  MB_CHK_ERR_CONT( ( A ) ); \
284  mhdf_closeData( filePtr, ( B ), &( E ) ); \
285  if( C ) mhdf_closeData( filePtr, ( D ), &( E ) ); \
286  write_finished(); \
287  assert( 0 ); \
288  return error( A ); \
289  } \
290  } while( false )
291 
292 #define debug_barrier() debug_barrier_line( __LINE__ )
294 
296 {
297  int fileline;
300 
301  public:
303  : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
304  {
305  }
306 
308  {
309  int new_count = mhdf_countOpenHandles( handle );
310  if( new_count != enter_count )
311  {
312  std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
313  << "Open at entrance: " << enter_count << std::endl
314  << "Open at exit: " << new_count << std::endl;
315  }
316  }
317 };
318 
319 MPEState WriteHDF5::topState;
320 MPEState WriteHDF5::subState;
321 
322 #ifdef NDEBUG
323 #define CHECK_OPEN_HANDLES
324 #else
325 #define CHECK_OPEN_HANDLES CheckOpenWriteHDF5Handles check_open_handles_( filePtr, __LINE__ )
326 #endif
327 
328 bool WriteHDF5::convert_handle_tag( const EntityHandle* source, EntityHandle* dest, size_t count ) const
329 {
330  bool some_valid = false;
331  for( size_t i = 0; i < count; ++i )
332  {
333  if( !source[i] )
334  dest[i] = 0;
335  else
336  {
337  dest[i] = idMap.find( source[i] );
338  if( dest[i] ) some_valid = true;
339  }
340  }
341 
342  return some_valid;
343 }
344 
345 bool WriteHDF5::convert_handle_tag( EntityHandle* data, size_t count ) const
346 {
347  assert( sizeof( EntityHandle ) == sizeof( wid_t ) );
348  return convert_handle_tag( data, data, count );
349 }
350 
352 {
354  for( pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi )
355  {
356  const EntityHandle n = pi->second - pi->first + 1;
357  dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n",
358  CN::EntityTypeName( TYPE_FROM_HANDLE( pi->first ) ),
359  (unsigned long)( ID_FROM_HANDLE( pi->first ) ),
360  (unsigned long)( ID_FROM_HANDLE( pi->first ) + n - 1 ), (unsigned long)id,
361  (unsigned long)( id + n - 1 ) );
362  if( TYPE_FROM_HANDLE( pi->first ) == MBPOLYGON || TYPE_FROM_HANDLE( pi->first ) == MBPOLYHEDRON )
363  {
364  int num_vertices = 0;
365  const EntityHandle* conn = 0;
366  iFace->get_connectivity( pi->first, conn, num_vertices );
367  dbgOut.printf( 3, " poly with %d verts/faces \n", num_vertices );
368  }
369  if( !idMap.insert( pi->first, id, n ).second ) return error( MB_FAILURE );
370  id += n;
371  }
372 
373  return MB_SUCCESS;
374 }
375 
376 const char* WriteHDF5::ExportSet::name() const
377 {
378  static char buffer[128];
379  switch( type )
380  {
381  case MBVERTEX:
382  return mhdf_node_type_handle();
383  case MBENTITYSET:
384  return mhdf_set_type_handle();
385  default:
386  sprintf( buffer, "%s%d", CN::EntityTypeName( type ), num_nodes );
387  return buffer;
388  }
389 }
390 
392 {
393  return new WriteHDF5( iface );
394 }
395 
399  maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets( false ), writeSetContents( false ),
400  writeSetChildren( false ), writeSetParents( false ), parallelWrite( false ), collectiveIO( false ),
401  writeTagDense( false ), writeProp( H5P_DEFAULT ), dbgOut( "H5M", stderr ), debugTrack( false )
402 {
403 }
404 
406 {
407  ErrorCode rval;
408 
409  if( writeUtil ) // init has already been called
410  return MB_SUCCESS;
411  /*
412  #ifdef DEBUG
413  H5Eset_auto(&hdf_error_handler, writeUtil); // HDF5 callback for errors
414  #endif
415  */
416  // For known tag types, store the corresponding HDF5 in which
417  // the tag data is to be written in the file.
418  // register_known_tag_types(iFace);
419 
420  // Get the util interface
421  rval = iFace->query_interface( writeUtil );
422  CHK_MB_ERR_0( rval );
423 
424  idMap.clear();
425 
426 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
427  herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
428 #else
429  herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
430 #endif
431  if( err < 0 )
432  {
433  errorHandler.func = 0;
434  errorHandler.data = 0;
435  }
436  else
437  {
438 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
439  err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
440 #else
441  err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
442 #endif
443  if( err < 0 )
444  {
445  errorHandler.func = 0;
446  errorHandler.data = 0;
447  }
448  }
449 
450  if( !topState.valid() ) topState = MPEState( "WriteHDF5", "yellow" );
451  if( !subState.valid() ) subState = MPEState( "WriteHDF5 subevent", "cyan" );
452 
453  return MB_SUCCESS;
454 }
455 
457 {
458  // Release memory allocated in lists
459  exportList.clear();
460  nodeSet.range.clear();
461  setSet.range.clear();
462  tagList.clear();
463  idMap.clear();
464 
465  HDF5ErrorHandler handler;
466 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
467  herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
468 #else
469  herr_t err = H5Eget_auto( &handler.func, &handler.data );
470 #endif
471  if( err >= 0 && handler.func == &handle_hdf5_error )
472  {
473  assert( handler.data == &errorHandler );
474 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
475  H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
476 #else
477  H5Eset_auto( errorHandler.func, errorHandler.data );
478 #endif
479  }
480 
481  return MB_SUCCESS;
482 }
483 
485 {
486  if( !writeUtil ) // init() failed.
487  return;
488 
490 }
491 
492 ErrorCode WriteHDF5::write_file( const char* filename,
493  bool overwrite,
494  const FileOptions& opts,
495  const EntityHandle* set_array,
496  const int num_sets,
497  const std::vector< std::string >& qa_records,
498  const Tag* tag_list,
499  int num_tags,
500  int user_dimension )
501 {
502  mhdf_Status status;
503 
504  parallelWrite = false;
505  collectiveIO = false;
506 
507  // Enable debug output
508  int tmpval = 0;
509  if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) ) dbgOut.set_verbosity( tmpval );
510 
511  // writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS"));
512  writeTagDense = true;
513 
514  // Enable some extra checks for reads. Note: amongst other things this
515  // will print errors if the entire file is not read, so if doing a
516  // partial read that is not a parallel read, this should be disabled.
517  debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
518 
520  int buf_size;
521  ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size );
522  if( MB_SUCCESS == rval && buf_size >= 24 ) bufferSize = buf_size;
523 
524  // Allocate internal buffer to use when gathering data to write.
525  dataBuffer = (char*)malloc( bufferSize );
527 
528  // Clear filePtr so we know if it is open upon failure
529  filePtr = 0;
530 
531  // Do actual write.
532  writeProp = H5P_DEFAULT;
533  ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags,
534  user_dimension );
535  // Close writeProp if it was opened
536  if( writeProp != H5P_DEFAULT ) H5Pclose( writeProp );
537 
538  // Free memory buffer
539  free( dataBuffer );
540  dataBuffer = 0;
541 
542  // Close file
543  bool created_file = false;
544  if( filePtr )
545  {
546  created_file = true;
547  mhdf_closeFile( filePtr, &status );
548  filePtr = 0;
549  if( mhdf_isError( &status ) )
550  {
551  MB_SET_ERR_CONT( mhdf_message( &status ) );
552  if( MB_SUCCESS == result ) result = MB_FAILURE;
553  }
554  }
555 
556  // Release other resources
557  if( MB_SUCCESS == result )
558  result = write_finished();
559  else
560  write_finished();
561 
562  // If write failed, remove file unless KEEP option was specified
563  if( MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ) )
564  remove( filename );
565 
566  return result;
567 }
568 
569 ErrorCode WriteHDF5::write_file_impl( const char* filename,
570  bool overwrite,
571  const FileOptions& opts,
572  const EntityHandle* set_array,
573  const int num_sets,
574  const std::vector< std::string >& qa_records,
575  const Tag* tag_list,
576  int num_tags,
577  int user_dimension )
578 {
579  ErrorCode result;
580  std::list< TagDesc >::const_iterator t_itor;
581  std::list< ExportSet >::iterator ex_itor;
582  EntityHandle elem_count, max_id;
583  double times[NUM_TIMES] = { 0 };
584 
585  if( MB_SUCCESS != init() ) return error( MB_FAILURE );
586 
587  // See if we need to report times
588  bool cputime = false;
589  result = opts.get_null_option( "CPUTIME" );
590  if( MB_SUCCESS == result ) cputime = true;
591 
592  CpuTimer timer;
593 
594  dbgOut.tprint( 1, "Gathering Mesh\n" );
595  topState.start( "gathering mesh" );
596 
597  // Gather mesh to export
598  exportList.clear();
599  if( 0 == num_sets || ( 1 == num_sets && set_array[0] == 0 ) )
600  {
601  result = gather_all_mesh();
602  topState.end( result );
603  CHK_MB_ERR_0( result );
604  }
605  else
606  {
607  std::vector< EntityHandle > passed_export_list( set_array, set_array + num_sets );
608  result = gather_mesh_info( passed_export_list );
609  topState.end( result );
610  CHK_MB_ERR_0( result );
611  }
612 
613  times[GATHER_TIME] = timer.time_elapsed();
614 
615  // if (nodeSet.range.size() == 0)
616  // return error(MB_ENTITY_NOT_FOUND);
617 
618  dbgOut.tprint( 1, "Checking ID space\n" );
619 
620  // Make sure ID space is sufficient
621  elem_count = nodeSet.range.size() + setSet.range.size();
622  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
623  elem_count += ex_itor->range.size();
624  max_id = (EntityHandle)1 << ( 8 * sizeof( wid_t ) - 1 );
625  if( elem_count > max_id )
626  {
627  MB_SET_ERR_CONT( "ID space insufficient for mesh size" );
628  return error( result );
629  }
630 
631  dbgOut.tprint( 1, "Creating File\n" );
632 
633  // Figure out the dimension in which to write the mesh.
634  int mesh_dim;
635  result = iFace->get_dimension( mesh_dim );
636  CHK_MB_ERR_0( result );
637 
638  if( user_dimension < 1 ) user_dimension = mesh_dim;
639  user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension;
640 
641  // Create the file layout, including all tables (zero-ed) and
642  // all structure and meta information.
643  const char* optnames[] = { "WRITE_PART", "FORMAT", 0 };
644  int junk;
645  parallelWrite = ( MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ) );
646  if( parallelWrite )
647  {
648  // Just store Boolean value based on string option here.
649  // parallel_create_file will set writeProp accordingly.
650  // collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE"));
651  // dbgOut.printf(2, "'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO");
652  // Do this all the time, as it appears to be much faster than indep in some cases
653  collectiveIO = true;
654  result =
655  parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times );
656  }
657  else
658  {
659  result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension );
660  }
661  if( MB_SUCCESS != result ) return error( result );
662 
663  times[CREATE_TIME] = timer.time_elapsed();
664 
665  dbgOut.tprint( 1, "Writing Nodes.\n" );
666  // Write node coordinates
667  if( !nodeSet.range.empty() || parallelWrite )
668  {
669  topState.start( "writing coords" );
670  result = write_nodes();
671  topState.end( result );
672  if( MB_SUCCESS != result ) return error( result );
673  }
674 
675  times[COORD_TIME] = timer.time_elapsed();
676 
677  dbgOut.tprint( 1, "Writing connectivity.\n" );
678 
679  // Write element connectivity
680  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
681  {
682  topState.start( "writing connectivity for ", ex_itor->name() );
683  result = write_elems( *ex_itor );
684  topState.end( result );
685  if( MB_SUCCESS != result ) return error( result );
686  }
687  times[CONN_TIME] = timer.time_elapsed();
688 
689  dbgOut.tprint( 1, "Writing sets.\n" );
690 
691  // Write meshsets
692  result = write_sets( times );
693  if( MB_SUCCESS != result ) return error( result );
694  debug_barrier();
695 
696  times[SET_TIME] = timer.time_elapsed();
697  dbgOut.tprint( 1, "Writing adjacencies.\n" );
698 
699  // Write adjacencies
700  // Tim says don't save node adjacencies!
701 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
702  result = write_adjacencies( nodeSet );
703  if( MB_SUCCESS != result ) return error( result );
704 #endif
705  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
706  {
707  topState.start( "writing adjacencies for ", ex_itor->name() );
708  result = write_adjacencies( *ex_itor );
709  topState.end( result );
710  if( MB_SUCCESS != result ) return error( result );
711  }
712  times[ADJ_TIME] = timer.time_elapsed();
713 
714  dbgOut.tprint( 1, "Writing tags.\n" );
715 
716  // Write tags
717  for( t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor )
718  {
719  std::string name;
720  iFace->tag_get_name( t_itor->tag_id, name );
721  topState.start( "writing tag: ", name.c_str() );
722  result = write_tag( *t_itor, times );
723  topState.end( result );
724  if( MB_SUCCESS != result ) return error( result );
725  }
726  times[TAG_TIME] = timer.time_elapsed();
727 
728  times[TOTAL_TIME] = timer.time_since_birth();
729 
730  if( cputime )
731  {
732  print_times( times );
733  }
734 
735  return MB_SUCCESS;
736 }
737 
739 {
740  ErrorCode rval;
741 
742  if( !ranges[0].all_of_type( MBVERTEX ) ) return error( MB_FAILURE );
743  nodeSet.range = ranges[0];
745  nodeSet.num_nodes = 1;
747 
748  if( !ranges[4].all_of_type( MBENTITYSET ) ) return error( MB_FAILURE );
749  setSet.range = ranges[4];
751  setSet.num_nodes = 0;
754 
755  exportList.clear();
756  std::vector< Range > bins( 1024 ); // Sort entities by connectivity length
757  // Resize is expensive due to Range copy, so start big
758  for( EntityType type = MBEDGE; type < MBENTITYSET; ++type )
759  {
760  ExportSet set;
761  set.max_num_ents = set.max_num_adjs = 0;
762  const int dim = CN::Dimension( type );
763 
764  // Group entities by connectivity length
765  bins.clear();
766  assert( dim >= 0 && dim <= 4 );
767  std::pair< Range::const_iterator, Range::const_iterator > p = ranges[dim].equal_range( type );
768  Range::const_iterator i = p.first;
769  while( i != p.second )
770  {
772  EntityHandle const* conn;
773  int len, firstlen;
774 
775  // Dummy storage vector for structured mesh "get_connectivity" function
776  std::vector< EntityHandle > storage;
777 
778  rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage );
779  if( MB_SUCCESS != rval ) return error( rval );
780 
781  for( ++i; i != p.second; ++i )
782  {
783  rval = iFace->get_connectivity( *i, conn, len, false, &storage );
784  if( MB_SUCCESS != rval ) return error( rval );
785 
786  if( len != firstlen ) break;
787  }
788 
789  if( firstlen >= (int)bins.size() ) bins.resize( firstlen + 1 );
790  bins[firstlen].merge( first, i );
791  }
792  // Create ExportSet for each group
793  for( std::vector< Range >::iterator j = bins.begin(); j != bins.end(); ++j )
794  {
795  if( j->empty() ) continue;
796 
797  set.range.clear();
798  set.type = type;
799  set.num_nodes = j - bins.begin();
800  exportList.push_back( set );
801  exportList.back().range.swap( *j );
802  }
803  }
804 
805  return MB_SUCCESS;
806 }
807 
808 // Gather the mesh to be written from a list of owning meshsets.
809 ErrorCode WriteHDF5::gather_mesh_info( const std::vector< EntityHandle >& export_sets )
810 {
811  ErrorCode rval;
812 
813  int dim;
814  Range range; // Temporary storage
815  Range ranges[5]; // Lists of entities to export, grouped by dimension
816 
817  // Gather list of all related sets
818  std::vector< EntityHandle > stack( export_sets );
819  std::copy( export_sets.begin(), export_sets.end(), stack.begin() );
820  std::vector< EntityHandle > set_children;
821  while( !stack.empty() )
822  {
823  EntityHandle meshset = stack.back();
824  stack.pop_back();
825  ranges[4].insert( meshset );
826 
827  // Get contained sets
828  range.clear();
829  rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range );
830  CHK_MB_ERR_0( rval );
831  for( Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor )
832  {
833  if( ranges[4].find( *ritor ) == ranges[4].end() ) stack.push_back( *ritor );
834  }
835 
836  // Get child sets
837  set_children.clear();
838  rval = iFace->get_child_meshsets( meshset, set_children, 1 );
839  CHK_MB_ERR_0( rval );
840  for( std::vector< EntityHandle >::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor )
841  {
842  if( ranges[4].find( *vitor ) == ranges[4].end() ) stack.push_back( *vitor );
843  }
844  }
845 
846  // Gather list of all mesh entities from list of sets,
847  // grouped by dimension.
848  for( Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor )
849  {
850  for( dim = 0; dim < 4; ++dim )
851  {
852  range.clear();
853  rval = iFace->get_entities_by_dimension( *setitor, dim, range, false );
854  CHK_MB_ERR_0( rval );
855 
856  ranges[dim].merge( range );
857  }
858  }
859 
860  // For each list of elements, append adjacent children and
861  // nodes to lists.
862  for( dim = 3; dim > 0; --dim )
863  {
864  for( int cdim = 1; cdim < dim; ++cdim )
865  {
866  range.clear();
867  rval = iFace->get_adjacencies( ranges[dim], cdim, false, range );
868  CHK_MB_ERR_0( rval );
869  ranges[cdim].merge( range );
870  }
871  range.clear();
872  rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range );
873  CHK_MB_ERR_0( rval );
874  ranges[0].merge( range );
875  }
876 
877  return initialize_mesh( ranges );
878 }
879 
880 // Gather all the mesh and related information to be written.
882 {
883  ErrorCode rval;
884  Range ranges[5];
885 
886  rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] );
887  if( MB_SUCCESS != rval ) return error( rval );
888 
889  rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] );
890  if( MB_SUCCESS != rval ) return error( rval );
891 
892  rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] );
893  if( MB_SUCCESS != rval ) return error( rval );
894 
895  rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] );
896  if( MB_SUCCESS != rval ) return error( rval );
897 
898  rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] );
899  if( MB_SUCCESS != rval ) return error( rval );
900 
901  return initialize_mesh( ranges );
902 }
903 
905 {
906  mhdf_Status status;
907  int dim, mesh_dim;
908  ErrorCode rval;
909  hid_t node_table;
910  long first_id, num_nodes;
911 
912  if( !nodeSet.total_num_ents ) return MB_SUCCESS; // No nodes!
913 
915 
916  rval = iFace->get_dimension( mesh_dim );
917  CHK_MB_ERR_0( rval );
918 
919  debug_barrier();
920  dbgOut.print( 3, "Opening Node Coords\n" );
921  node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status );
922  CHK_MHDF_ERR_0( status );
923  IODebugTrack track( debugTrack, "nodes", num_nodes );
924 
925  double* buffer = (double*)dataBuffer;
926 #ifdef BLOCKED_COORD_IO
927  int chunk_size = bufferSize / sizeof( double );
928 #else
929  int chunk_size = bufferSize / ( 3 * sizeof( double ) );
930 #endif
931 
932  long remaining = nodeSet.range.size();
933  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
934  if( nodeSet.max_num_ents )
935  {
936  assert( nodeSet.max_num_ents >= remaining );
937  num_writes = ( nodeSet.max_num_ents + chunk_size - 1 ) / chunk_size;
938  }
939  long remaining_writes = num_writes;
940 
941  long offset = nodeSet.offset;
943  dbgOut.printf( 3, "Writing %ld nodes in %ld blocks of %d\n", remaining, ( remaining + chunk_size - 1 ) / chunk_size,
944  chunk_size );
945  while( remaining )
946  {
948  long count = chunk_size < remaining ? chunk_size : remaining;
949  remaining -= count;
950  Range::const_iterator end = iter;
951  end += count;
952 
953 #ifdef BLOCKED_COORD_IO
954  for( int d = 0; d < dim; d++ )
955  {
956  if( d < mesh_dim )
957  {
958  rval = writeUtil->get_node_coords( d, iter, end, count, buffer );
959  CHK_MB_ERR_1( rval, node_table, status );
960  }
961  else
962  memset( buffer, 0, count * sizeof( double ) );
963 
964  dbgOut.printf( 3, " writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)( 'X' + d ),
965  num_writes - remaining_writes + 1, num_writes, count, offset );
966  mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
967  CHK_MHDF_ERR_1( status, node_table );
968  }
969 #else
970  rval = writeUtil->get_node_coords( -1, iter, end, 3 * count, buffer );
971  CHK_MB_ERR_1( rval, node_table, status );
972  dbgOut.printf( 3, " writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1,
973  num_writes, count, offset );
974  mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status );
975  CHK_MHDF_ERR_1( status, node_table );
976 #endif
977  track.record_io( offset, count );
978 
979  iter = end;
980  offset += count;
981  --remaining_writes;
982  }
983 
984  // Do empty writes if necessary for parallel collective IO
985  if( collectiveIO )
986  {
987  while( remaining_writes-- )
988  {
989  assert( writeProp != H5P_DEFAULT );
990 #ifdef BLOCKED_COORD_IO
991  for( int d = 0; d < dim; ++d )
992  {
993  dbgOut.printf( 3, " writing (empty) %c node chunk %ld of %ld.\n", (char)( 'X' + d ),
994  num_writes - remaining_writes, num_writes );
995  mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
996  CHK_MHDF_ERR_1( status, node_table );
997  }
998 #else
999  dbgOut.printf( 3, " writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes );
1000  mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status );
1001  CHK_MHDF_ERR_1( status, node_table );
1002 #endif
1003  }
1004  }
1005 
1006  mhdf_closeData( filePtr, node_table, &status );
1007  CHK_MHDF_ERR_0( status );
1008 
1009  track.all_reduce();
1010  return MB_SUCCESS;
1011 }
1012 
1014 {
1015  mhdf_Status status;
1016  ErrorCode rval;
1017  long first_id;
1018  int nodes_per_elem;
1019  long table_size;
1020 
1022 
1023  debug_barrier();
1024  dbgOut.printf( 2, "Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(),
1025  CN::EntityTypeName( elems.type ), elems.num_nodes );
1026  dbgOut.print( 3, "Writing elements", elems.range );
1027 
1028  hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status );
1029  CHK_MHDF_ERR_0( status );
1030  IODebugTrack track( debugTrack, elems.name() && strlen( elems.name() ) ? elems.name() : "<ANONYMOUS ELEM SET?>",
1031  table_size );
1032 
1033  assert( (unsigned long)first_id <= elems.first_id );
1034  assert( (unsigned long)table_size >= elems.offset + elems.range.size() );
1035 
1037  int chunk_size = bufferSize / ( elems.num_nodes * sizeof( wid_t ) );
1038  long offset = elems.offset;
1039  long remaining = elems.range.size();
1040  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
1041  if( elems.max_num_ents )
1042  {
1043  assert( elems.max_num_ents >= remaining );
1044  num_writes = ( elems.max_num_ents + chunk_size - 1 ) / chunk_size;
1045  }
1046  long remaining_writes = num_writes;
1047  Range::iterator iter = elems.range.begin();
1048 
1049  while( remaining )
1050  {
1052  long count = chunk_size < remaining ? chunk_size : remaining;
1053  remaining -= count;
1054 
1055  Range::iterator next = iter;
1056  next += count;
1057  rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer );
1058  CHK_MB_ERR_1( rval, elem_table, status );
1059  iter = next;
1060 
1061  for( long i = 0; i < count * nodes_per_elem; ++i )
1062  {
1063  buffer[i] = idMap.find( buffer[i] );
1064  if( 0 == buffer[i] )
1065  {
1066  MB_SET_ERR_CONT( "Invalid " << elems.name() << " element connectivity. Write Aborted" );
1067  mhdf_closeData( filePtr, elem_table, &status );
1068  return error( MB_FAILURE );
1069  }
1070  }
1071 
1072  dbgOut.printf( 3, " writing node connectivity %ld of %ld, %ld values at %ld\n",
1073  num_writes - remaining_writes + 1, num_writes, count, offset );
1074  track.record_io( offset, count );
1075  mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status );
1076  CHK_MHDF_ERR_1( status, elem_table );
1077 
1078  offset += count;
1079  --remaining_writes;
1080  }
1081 
1082  // Do empty writes if necessary for parallel collective IO
1083  if( collectiveIO )
1084  {
1085  while( remaining_writes-- )
1086  {
1087  assert( writeProp != H5P_DEFAULT );
1088  dbgOut.printf( 3, " writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1,
1089  num_writes );
1090  mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
1091  CHK_MHDF_ERR_1( status, elem_table );
1092  }
1093  }
1094 
1095  mhdf_closeData( filePtr, elem_table, &status );
1096  CHK_MHDF_ERR_0( status );
1097 
1098  track.all_reduce();
1099  return MB_SUCCESS;
1100 }
1101 
1103  long& num_entities,
1104  long& num_children,
1105  long& num_parents,
1106  unsigned long& flags )
1107 {
1108  ErrorCode rval;
1109  int i;
1110  unsigned int u;
1111 
1112  rval = iFace->get_number_entities_by_handle( set, i, false );
1113  CHK_MB_ERR_0( rval );
1114  num_entities = i;
1115 
1116  rval = iFace->num_child_meshsets( set, &i );
1117  CHK_MB_ERR_0( rval );
1118  num_children = i;
1119 
1120  rval = iFace->num_parent_meshsets( set, &i );
1121  CHK_MB_ERR_0( rval );
1122  num_parents = i;
1123 
1124  rval = iFace->get_meshset_options( set, u );
1125  CHK_MB_ERR_0( rval );
1126  flags = u;
1127 
1128  return MB_SUCCESS;
1129 }
1130 
1132  const hid_t handle,
1133  IODebugTrack& track,
1134  Range* ranged,
1135  Range* null_stripped,
1136  std::vector< long >* set_sizes )
1137 {
1138  // ranged must be non-null for CONTENTS and null for anything else
1139  assert( ( which_data == WriteUtilIface::CONTENTS ) == ( 0 != ranged ) );
1140  ErrorCode rval;
1141  mhdf_Status status;
1142 
1143  debug_barrier();
1144 
1145  // Function pointer type used to write set data
1146  void ( *write_func )( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* );
1147  long max_vals; // Max over all procs of number of values to write to data set
1148  long offset; // Offset in HDF5 dataset at which to write next block of data
1149  switch( which_data )
1150  {
1152  assert( ranged != 0 && null_stripped != 0 && set_sizes != 0 );
1153  write_func = &mhdf_writeSetDataWithOpt;
1154  max_vals = maxNumSetContents;
1155  offset = setContentsOffset;
1156  dbgOut.print( 2, "Writing set contents\n" );
1157  break;
1159  assert( !ranged && !null_stripped && !set_sizes );
1161  max_vals = maxNumSetChildren;
1162  offset = setChildrenOffset;
1163  dbgOut.print( 2, "Writing set child lists\n" );
1164  break;
1166  assert( !ranged && !null_stripped && !set_sizes );
1168  max_vals = maxNumSetParents;
1169  offset = setParentsOffset;
1170  dbgOut.print( 2, "Writing set parent lists\n" );
1171  break;
1172  default:
1173  assert( false );
1174  return MB_FAILURE;
1175  }
1176  // assert(max_vals > 0); // Should have skipped this function otherwise
1177 
1178  // buffer to use for IO
1179  wid_t* buffer = reinterpret_cast< wid_t* >( dataBuffer );
1180  // number of handles that will fit in the buffer
1181  const size_t buffer_size = bufferSize / sizeof( EntityHandle );
1182  // the total number of write calls that must be made, including no-ops for collective io
1183  const size_t num_total_writes = ( max_vals + buffer_size - 1 ) / buffer_size;
1184 
1185  std::vector< SpecialSetData >::iterator si = specialSets.begin();
1186 
1187  std::vector< wid_t > remaining; // data left over from prev iteration because it didn't fit in buffer
1188  size_t remaining_offset = 0; // avoid erasing from front of 'remaining'
1189  const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data
1190  size_t remaining_count = 0;
1191  const wid_t* special_rem_ptr = 0;
1192  Range::const_iterator i = setSet.range.begin(), j, rhint, nshint;
1193  if( ranged ) rhint = ranged->begin();
1194  if( null_stripped ) nshint = null_stripped->begin();
1195  for( size_t w = 0; w < num_total_writes; ++w )
1196  {
1197  if( i == setSet.range.end() && !remaining.empty() && !remaining_ptr )
1198  {
1199  // If here, then we've written everything but we need to
1200  // make more write calls because we're doing collective IO
1201  // in parallel
1202  ( *write_func )( handle, 0, 0, id_type, 0, writeProp, &status );
1203  CHK_MHDF_ERR_0( status );
1204  continue;
1205  }
1206 
1207  // If we had some left-over data from a range-compacted set
1208  // from the last iteration, add it to the buffer now
1209  size_t count = 0;
1210  if( !remaining.empty() )
1211  {
1212  count = remaining.size() - remaining_offset;
1213  if( count > buffer_size )
1214  {
1215  memcpy( buffer, &remaining[remaining_offset], buffer_size * sizeof( wid_t ) );
1216  count = buffer_size;
1217  remaining_offset += buffer_size;
1218  }
1219  else
1220  {
1221  memcpy( buffer, &remaining[remaining_offset], count * sizeof( wid_t ) );
1222  remaining_offset = 0;
1223  remaining.clear();
1224  }
1225  }
1226  // If we had some left-over data from a non-range-compacted set
1227  // from the last iteration, add it to the buffer now
1228  else if( remaining_ptr )
1229  {
1230  if( remaining_count > buffer_size )
1231  {
1232  rval = vector_to_id_list( remaining_ptr, buffer, buffer_size );
1233  CHK_MB_ERR_0( rval );
1234  count = buffer_size;
1235  remaining_ptr += count;
1236  remaining_count -= count;
1237  }
1238  else
1239  {
1240  rval = vector_to_id_list( remaining_ptr, buffer, remaining_count );
1241  CHK_MB_ERR_0( rval );
1242  count = remaining_count;
1243  remaining_ptr = 0;
1244  remaining_count = 0;
1245  }
1246  }
1247  // If we had some left-over data from a "special" (i.e. parallel shared)
1248  // set.
1249  else if( special_rem_ptr )
1250  {
1251  if( remaining_count > buffer_size )
1252  {
1253  memcpy( buffer, special_rem_ptr, buffer_size * sizeof( wid_t ) );
1254  count = buffer_size;
1255  special_rem_ptr += count;
1256  remaining_count -= count;
1257  }
1258  else
1259  {
1260  memcpy( buffer, special_rem_ptr, remaining_count * sizeof( wid_t ) );
1261  count = remaining_count;
1262  special_rem_ptr = 0;
1263  remaining_count = 0;
1264  }
1265  }
1266 
1267  // While there is both space remaining in the buffer and
1268  // more sets to write, append more set data to buffer.
1269 
1270  while( count < buffer_size && i != setSet.range.end() )
1271  {
1272  // Special case for "special" (i.e. parallel shared) sets:
1273  // we already have the data in a vector, just copy it.
1274  if( si != specialSets.end() && si->setHandle == *i )
1275  {
1276  std::vector< wid_t >& list = ( which_data == WriteUtilIface::CONTENTS ) ? si->contentIds
1277  : ( which_data == WriteUtilIface::PARENTS ) ? si->parentIds
1278  : si->childIds;
1279  size_t append = list.size();
1280  if( count + list.size() > buffer_size )
1281  {
1282  append = buffer_size - count;
1283  special_rem_ptr = &list[append];
1284  remaining_count = list.size() - append;
1285  }
1286  memcpy( buffer + count, &list[0], append * sizeof( wid_t ) );
1287  ++i;
1288  ++si;
1289  count += append;
1290  continue;
1291  }
1292 
1293  j = i;
1294  ++i;
1295  const EntityHandle* ptr;
1296  int len;
1297  unsigned char flags;
1298  rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags );
1299  if( MB_SUCCESS != rval ) return rval;
1300  if( which_data == WriteUtilIface::CONTENTS && !( flags & MESHSET_ORDERED ) )
1301  {
1302  bool compacted;
1303  remaining.clear();
1304  if( len == 0 )
1305  compacted = false;
1306  else
1307  {
1308  assert( !( len % 2 ) );
1309  rval = range_to_blocked_list( ptr, len / 2, remaining, compacted );
1310  if( MB_SUCCESS != rval ) return rval;
1311  }
1312  if( compacted )
1313  {
1314  rhint = ranged->insert( rhint, *j );
1315  set_sizes->push_back( remaining.size() );
1316  }
1317  else if( remaining.size() != (unsigned)len )
1318  {
1319  nshint = null_stripped->insert( nshint, *j );
1320  set_sizes->push_back( remaining.size() );
1321  }
1322 
1323  if( count + remaining.size() <= buffer_size )
1324  {
1325  if( !remaining.empty() )
1326  memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining.size() );
1327  count += remaining.size();
1328  remaining.clear();
1329  remaining_offset = 0;
1330  }
1331  else
1332  {
1333  remaining_offset = buffer_size - count;
1334  memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining_offset );
1335  count += remaining_offset;
1336  }
1337  }
1338  else
1339  {
1340  if( count + len > buffer_size )
1341  {
1342  size_t append = buffer_size - count;
1343  remaining_ptr = ptr + append;
1344  remaining_count = len - append;
1345  len = append;
1346  }
1347 
1348  rval = vector_to_id_list( ptr, buffer + count, len );
1349  count += len;
1350  }
1351  }
1352 
1353  // Write the buffer.
1354  ( *write_func )( handle, offset, count, id_type, buffer, writeProp, &status );
1355  CHK_MHDF_ERR_0( status );
1356  track.record_io( offset, count );
1357  offset += count;
1358  }
1359 
1360  return MB_SUCCESS;
1361 }
1362 
1364 {
1365  mhdf_Status status;
1366  ErrorCode rval;
1367  long first_id, size;
1368  hid_t table;
1369  CpuTimer timer;
1370 
1372  /* If no sets, just return success */
1373  if( !writeSets ) return MB_SUCCESS;
1374 
1375  debug_barrier();
1376  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1377  dbgOut.print( 3, "Non-shared sets", setSet.range );
1378 
1379  /* Write set parents */
1380  if( writeSetParents )
1381  {
1382  topState.start( "writing parent lists for local sets" );
1383  table = mhdf_openSetParents( filePtr, &size, &status );
1384  CHK_MHDF_ERR_0( status );
1385  IODebugTrack track( debugTrack, "SetParents", size );
1386 
1387  rval = write_set_data( WriteUtilIface::PARENTS, table, track );
1388  topState.end( rval );
1389  CHK_MB_ERR_1( rval, table, status );
1390 
1391  mhdf_closeData( filePtr, table, &status );
1392  CHK_MHDF_ERR_0( status );
1393 
1394  times[SET_PARENT] = timer.time_elapsed();
1395  track.all_reduce();
1396  }
1397 
1398  /* Write set children */
1399  if( writeSetChildren )
1400  {
1401  topState.start( "writing child lists for local sets" );
1402  table = mhdf_openSetChildren( filePtr, &size, &status );
1403  CHK_MHDF_ERR_0( status );
1404  IODebugTrack track( debugTrack, "SetChildren", size );
1405 
1406  rval = write_set_data( WriteUtilIface::CHILDREN, table, track );
1407  topState.end( rval );
1408  CHK_MB_ERR_1( rval, table, status );
1409 
1410  mhdf_closeData( filePtr, table, &status );
1411  CHK_MHDF_ERR_0( status );
1412 
1413  times[SET_CHILD] = timer.time_elapsed();
1414  track.all_reduce();
1415  }
1416 
1417  /* Write set contents */
1418  Range ranged_sets, null_stripped_sets;
1419  std::vector< long > set_sizes;
1420  if( writeSetContents )
1421  {
1422  topState.start( "writing content lists for local sets" );
1423  table = mhdf_openSetData( filePtr, &size, &status );
1424  CHK_MHDF_ERR_0( status );
1425  IODebugTrack track( debugTrack, "SetContents", size );
1426 
1427  rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes );
1428  topState.end( rval );
1429  CHK_MB_ERR_1( rval, table, status );
1430 
1431  mhdf_closeData( filePtr, table, &status );
1432  CHK_MHDF_ERR_0( status );
1433 
1434  times[SET_CONTENT] = timer.time_elapsed();
1435  track.all_reduce();
1436  }
1437  assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
1438 
1439  /* Write set description table */
1440 
1441  debug_barrier();
1442  topState.start( "writing descriptions of local sets" );
1443  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1444  dbgOut.print( 3, "Non-shared sets", setSet.range );
1445 
1446  /* Open the table */
1447  table = mhdf_openSetMeta( filePtr, &size, &first_id, &status );
1448  CHK_MHDF_ERR_0( status );
1449  IODebugTrack track_meta( debugTrack, "SetMeta", size );
1450 
1451  /* Some debug stuff */
1452  debug_barrier();
1453  dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
1454  dbgOut.print( 3, "Non-shared sets", setSet.range );
1455 
1456  /* Counts and buffers and such */
1457  mhdf_index_t* const buffer = reinterpret_cast< mhdf_index_t* >( dataBuffer );
1458  const size_t buffer_size = bufferSize / ( 4 * sizeof( mhdf_index_t ) );
1459  const size_t num_local_writes = ( setSet.range.size() + buffer_size - 1 ) / buffer_size;
1460  const size_t num_global_writes = ( setSet.max_num_ents + buffer_size - 1 ) / buffer_size;
1461  assert( num_local_writes <= num_global_writes );
1462  assert( num_global_writes > 0 );
1463 
1464  /* data about sets for which number of handles written is
1465  * not the same as the number of handles in the set
1466  * (range-compacted or null handles stripped out)
1467  */
1469  Range::const_iterator r = ranged_sets.begin();
1470  Range::const_iterator s = null_stripped_sets.begin();
1471  std::vector< mhdf_index_t >::const_iterator n = set_sizes.begin();
1472  assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
1473 
1474  /* We write the end index for each list, rather than the count */
1475  mhdf_index_t prev_contents_end = setContentsOffset - 1;
1476  mhdf_index_t prev_children_end = setChildrenOffset - 1;
1477  mhdf_index_t prev_parents_end = setParentsOffset - 1;
1478 
1479  /* While there is more data to write */
1480  size_t offset = setSet.offset;
1481  std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
1482  for( size_t w = 0; w < num_local_writes; ++w )
1483  {
1484  // Get a buffer full of data
1485  size_t count = 0;
1486  while( count < buffer_size && i != setSet.range.end() )
1487  {
1488  // Get set properties
1489  long num_ent, num_child, num_parent;
1490  unsigned long flags;
1491  if( si != specialSets.end() && si->setHandle == *i )
1492  {
1493  flags = si->setFlags;
1494  num_ent = si->contentIds.size();
1495  num_child = si->childIds.size();
1496  num_parent = si->parentIds.size();
1497  ++si;
1498  if( r != ranged_sets.end() && *i == *r )
1499  {
1500  assert( flags & mhdf_SET_RANGE_BIT );
1501  ++r;
1502  ++n;
1503  }
1504  else if( s != null_stripped_sets.end() && *i == *s )
1505  {
1506  ++s;
1507  ++n;
1508  }
1509  }
1510  else
1511  {
1512  assert( si == specialSets.end() || si->setHandle > *i );
1513 
1514  // Get set properties
1515  rval = get_set_info( *i, num_ent, num_child, num_parent, flags );
1516  CHK_MB_ERR_1( rval, table, status );
1517 
1518  // Check if size is something other than num handles in set
1519  if( r != ranged_sets.end() && *i == *r )
1520  {
1521  num_ent = *n;
1522  ++r;
1523  ++n;
1524  flags |= mhdf_SET_RANGE_BIT;
1525  }
1526  else if( s != null_stripped_sets.end() && *i == *s )
1527  {
1528  num_ent = *n;
1529  ++s;
1530  ++n;
1531  }
1532  }
1533 
1534  // Put data in buffer
1535  mhdf_index_t* local = buffer + 4 * count;
1536  prev_contents_end += num_ent;
1537  prev_children_end += num_child;
1538  prev_parents_end += num_parent;
1539  local[0] = prev_contents_end;
1540  local[1] = prev_children_end;
1541  local[2] = prev_parents_end;
1542  local[3] = flags;
1543 
1544  // Iterate
1545  ++count;
1546  ++i;
1547  }
1548 
1549  // Write the data
1550  mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status );
1551  CHK_MHDF_ERR_1( status, table );
1552  track_meta.record_io( offset, count );
1553  offset += count;
1554  }
1555  assert( r == ranged_sets.end() );
1556  assert( s == null_stripped_sets.end() );
1557  assert( n == set_sizes.end() );
1558 
1559  /* If doing parallel write with collective IO, do null write
1560  * calls because other procs aren't done yet and write calls
1561  * are collective */
1562  for( size_t w = num_local_writes; w != num_global_writes; ++w )
1563  {
1564  mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status );
1565  CHK_MHDF_ERR_1( status, table );
1566  }
1567 
1568  topState.end();
1569  mhdf_closeData( filePtr, table, &status );
1570  CHK_MHDF_ERR_0( status );
1571 
1572  times[SET_META] = timer.time_elapsed();
1573  track_meta.all_reduce();
1574 
1575  return MB_SUCCESS;
1576 }
1577 
1578 template < class HandleRangeIter >
1579 inline size_t count_num_handles( HandleRangeIter iter, HandleRangeIter end )
1580 {
1581  size_t result = 0;
1582  for( ; iter != end; ++iter )
1583  result += iter->second - iter->first + 1;
1584 
1585  return result;
1586 }
1587 
1588 template < class HandleRangeIter >
1589 inline ErrorCode range_to_id_list_templ( HandleRangeIter begin,
1590  HandleRangeIter end,
1592  WriteHDF5::wid_t* array )
1593 {
1594  ErrorCode rval = MB_SUCCESS;
1596  WriteHDF5::wid_t* i = array;
1597  for( HandleRangeIter pi = begin; pi != end; ++pi )
1598  {
1599  EntityHandle h = pi->first;
1600  while( h <= pi->second )
1601  {
1602  ri = idMap.lower_bound( ri, idMap.end(), h );
1603  if( ri == idMap.end() || ri->begin > h )
1604  {
1605  rval = MB_ENTITY_NOT_FOUND;
1606  *i = 0;
1607  ++i;
1608  ++h;
1609  continue;
1610  }
1611 
1612  // compute the last available value of the found target range (ri iterator)
1613  WriteHDF5::wid_t last_valid_input_value_in_current_map_range = ri->begin + ri->count - 1;
1614  // limit the number of steps we do on top of h so we do not overflow the output range
1615  // span
1616  WriteHDF5::wid_t step_until = std::min( last_valid_input_value_in_current_map_range, pi->second );
1617  WriteHDF5::wid_t n = step_until - h + 1;
1618  assert( n > 0 ); // We must at least step 1
1619 
1620  WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
1621  for( WriteHDF5::wid_t j = 0; j < n; ++i, ++j )
1622  *i = id + j;
1623  h += n;
1624  }
1625  }
1626 
1627  assert( i == array + count_num_handles( begin, end ) );
1628  return rval;
1629 }
1630 
1631 template < class HandleRangeIter >
1632 inline ErrorCode range_to_blocked_list_templ( HandleRangeIter begin,
1633  HandleRangeIter end,
1635  std::vector< WriteHDF5::wid_t >& output_id_list,
1636  bool& ranged_list )
1637 {
1638  output_id_list.clear();
1639  if( begin == end )
1640  {
1641  ranged_list = false;
1642  return MB_SUCCESS;
1643  }
1644 
1645  // First try ranged format, but give up if we reach the
1646  // non-range format size.
1648 
1649  const size_t num_handles = count_num_handles( begin, end );
1650  // If we end up with more than this many range blocks, then
1651  // we're better off just writing the set as a simple list
1652  size_t pairs_remaining = num_handles / 2;
1653  for( HandleRangeIter pi = begin; pi != end; ++pi )
1654  {
1655  EntityHandle h = pi->first;
1656  WriteHDF5::wid_t local_mapped_from_subrange = 0;
1657  while( h <= pi->second )
1658  {
1659  ri = idMap.lower_bound( ri, idMap.end(), h );
1660  if( ri == idMap.end() || ri->begin > h )
1661  {
1662  ++h;
1663  continue;
1664  }
1665 
1666  WriteHDF5::wid_t n = pi->second - pi->first + 1 - local_mapped_from_subrange;
1667  if( n > ri->count ) n = ri->count;
1668 
1669  WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
1670  // see if we can go to the end of the range
1671  if( id + n > ri->value + ri->count ) // we have to reduce n, because we cannot go over next subrange
1672  {
1673  if( ri->value + ri->count - id > 0 ) n = ri->value + ri->count - id;
1674  }
1675 
1676  // See if we can append it to the previous range
1677  if( !output_id_list.empty() && output_id_list[output_id_list.size() - 2] + output_id_list.back() == id )
1678  {
1679  output_id_list.back() += n;
1680  }
1681 
1682  // If we ran out of space, (or set is empty) just do list format
1683  else if( !pairs_remaining )
1684  {
1685  ranged_list = false;
1686  output_id_list.resize( num_handles );
1687  range_to_id_list_templ( begin, end, idMap, &output_id_list[0] );
1688  output_id_list.erase( std::remove( output_id_list.begin(), output_id_list.end(), 0u ),
1689  output_id_list.end() );
1690  return MB_SUCCESS;
1691  }
1692 
1693  //
1694  else
1695  {
1696  --pairs_remaining;
1697  output_id_list.push_back( id );
1698  output_id_list.push_back( n );
1699  }
1700  local_mapped_from_subrange += n; // we already mapped so many
1701  h += n;
1702  }
1703  }
1704 
1705  ranged_list = true;
1706  return MB_SUCCESS;
1707 }
1708 
1710  std::vector< wid_t >& output_id_list,
1711  bool& ranged_list )
1712 {
1713  return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap,
1714  output_id_list, ranged_list );
1715 }
1716 
1718  size_t num_input_ranges,
1719  std::vector< wid_t >& output_id_list,
1720  bool& ranged_list )
1721 {
1722  // We assume this in the cast on the following line
1723  typedef std::pair< EntityHandle, EntityHandle > mtype;
1724  assert( sizeof( mtype ) == 2 * sizeof( EntityHandle ) );
1725  const mtype* arr = reinterpret_cast< const mtype* >( array );
1726  return range_to_blocked_list_templ( arr, arr + num_input_ranges, idMap, output_id_list, ranged_list );
1727 }
1728 
1730 {
1731  return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array );
1732 }
1733 
1735  size_t input_len,
1736  wid_t* output,
1737  size_t& output_len,
1738  bool remove_zeros )
1739 {
1740  const EntityHandle* i_iter = input;
1741  const EntityHandle* i_end = input + input_len;
1742  wid_t* o_iter = output;
1743  for( ; i_iter != i_end; ++i_iter )
1744  {
1745  wid_t id = idMap.find( *i_iter );
1746  if( !remove_zeros || id != 0 )
1747  {
1748  *o_iter = id;
1749  ++o_iter;
1750  }
1751  }
1752  output_len = o_iter - output;
1753 
1754  return MB_SUCCESS;
1755 }
1756 
1757 ErrorCode WriteHDF5::vector_to_id_list( const std::vector< EntityHandle >& input,
1758  std::vector< wid_t >& output,
1759  bool remove_zeros )
1760 {
1761  output.resize( input.size() );
1762  size_t output_size = 0;
1763  ErrorCode rval = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros );
1764  output.resize( output_size );
1765  return rval;
1766 }
1767 
1769 {
1770  size_t output_len;
1771  return vector_to_id_list( input, count, output, output_len, false );
1772 }
1773 
1774 inline ErrorCode WriteHDF5::get_adjacencies( EntityHandle entity, std::vector< wid_t >& adj )
1775 {
1776  const EntityHandle* adj_array;
1777  int num_adj;
1778  ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj );
1779  if( MB_SUCCESS != rval ) return error( rval );
1780 
1781  size_t j = 0;
1782  adj.resize( num_adj );
1783  for( int i = 0; i < num_adj; ++i )
1784  if( wid_t id = idMap.find( adj_array[i] ) ) adj[j++] = id;
1785  adj.resize( j );
1786 
1787  return MB_SUCCESS;
1788 }
1789 
1791 {
1792  ErrorCode rval;
1793  mhdf_Status status;
1794  Range::const_iterator iter;
1795  const Range::const_iterator end = elements.range.end();
1796  std::vector< wid_t > adj_list;
1797 
1799 
1800  debug_barrier();
1801 
1802  /* Count Adjacencies */
1803  long count = 0;
1804  // for (iter = elements.range.begin(); iter != end; ++iter) {
1805  // adj_list.clear();
1806  // rval = get_adjacencies(*iter, adj_list);CHK_MB_ERR_0(rval);
1807  //
1808  // if (adj_list.size() > 0)
1809  // count += adj_list.size() + 2;
1810  //}
1811 
1812  // if (count == 0)
1813  // return MB_SUCCESS;
1814 
1815  long offset = elements.adj_offset;
1816  if( elements.max_num_adjs == 0 ) return MB_SUCCESS;
1817 
1818  /* Create data list */
1819  hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status );
1820  CHK_MHDF_ERR_0( status );
1821  IODebugTrack track( debugTrack, "Adjacencies", count );
1822 
1823  /* Write data */
1825  long chunk_size = bufferSize / sizeof( wid_t );
1826  long num_writes = ( elements.max_num_adjs + chunk_size - 1 ) / chunk_size;
1828  count = 0;
1829  for( iter = elements.range.begin(); iter != end; ++iter )
1830  {
1831  adj_list.clear();
1832  rval = get_adjacencies( *iter, adj_list );
1833  CHK_MB_ERR_1( rval, table, status );
1834  if( adj_list.size() == 0 ) continue;
1835 
1836  // If buffer is full, flush it
1837  if( count + adj_list.size() + 2 > (unsigned long)chunk_size )
1838  {
1839  dbgOut.print( 3, " writing adjacency chunk.\n" );
1840  track.record_io( offset, count );
1841  mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
1842  CHK_MHDF_ERR_1( status, table );
1844 
1845  offset += count;
1846  count = 0;
1847  }
1848 
1849  buffer[count++] = idMap.find( *iter );
1850  buffer[count++] = adj_list.size();
1851 
1852  assert( adj_list.size() + 2 < (unsigned long)chunk_size );
1853  memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof( wid_t ) );
1854  count += adj_list.size();
1855  }
1856 
1857  if( count )
1858  {
1859  dbgOut.print( 2, " writing final adjacency chunk.\n" );
1860  mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
1861  CHK_MHDF_ERR_1( status, table );
1862 
1863  offset += count;
1864  count = 0;
1865  --num_writes;
1866  }
1867 
1868  // Do empty writes if necessary for parallel collective IO
1869  if( collectiveIO )
1870  {
1871  while( num_writes > 0 )
1872  {
1873  --num_writes;
1874  assert( writeProp != H5P_DEFAULT );
1875  dbgOut.print( 2, " writing empty adjacency chunk.\n" );
1876  mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
1877  CHK_MHDF_ERR_1( status, table );
1878  }
1879  }
1880 
1881  mhdf_closeData( filePtr, table, &status );
1882  CHK_MHDF_ERR_0( status );
1883 
1884  track.all_reduce();
1885  return MB_SUCCESS;
1886 }
1887 
1888 ErrorCode WriteHDF5::write_tag( const TagDesc& tag_data, double* times )
1889 {
1890  std::string name;
1891  ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name );
1892  if( MB_SUCCESS != rval ) return error( rval );
1893 
1895  debug_barrier();
1896  dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() );
1897 
1898  int moab_size, elem_size, array_len;
1899  DataType moab_type;
1900  mhdf_TagDataType mhdf_type;
1901  hid_t hdf5_type;
1902  rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type );
1903  if( MB_SUCCESS != rval ) return error( rval );
1904 
1905  CpuTimer timer;
1906  if( array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse )
1907  {
1908  dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() );
1909  rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size );
1910  times[VARLEN_TAG_TIME] += timer.time_elapsed();
1911  }
1912  else
1913  {
1914  int data_len = elem_size;
1915  if( moab_type != MB_TYPE_BIT ) data_len *= array_len;
1916  if( tag_data.write_sparse )
1917  {
1918  dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() );
1919  rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len );
1920  times[SPARSE_TAG_TIME] += timer.time_elapsed();
1921  }
1922  for( size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i )
1923  {
1924  const ExportSet* set = find( tag_data.dense_list[i] );
1925  assert( 0 != set );
1926  debug_barrier();
1927  dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() );
1928  subState.start( "writing dense data for tag: ", ( name + ":" + set->name() ).c_str() );
1929  rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len );
1930  subState.end( rval );
1931  }
1932  times[DENSE_TAG_TIME] += timer.time_elapsed();
1933  }
1934 
1935  H5Tclose( hdf5_type );
1936  return MB_SUCCESS == rval ? MB_SUCCESS : error( rval );
1937 }
1938 
1940  const Range& range,
1941  hid_t id_table,
1942  size_t table_size,
1943  const char* name )
1944 {
1945  ErrorCode rval;
1946  mhdf_Status status;
1947 
1949 
1950  std::string tname( name ? name : "<UNKNOWN TAG?>" );
1951  tname += " - Ids";
1952  IODebugTrack track( debugTrack, tname, table_size );
1953 
1954  // Set up data buffer for writing IDs
1955  size_t chunk_size = bufferSize / sizeof( wid_t );
1956  wid_t* id_buffer = (wid_t*)dataBuffer;
1957 
1958  // Write IDs of tagged entities.
1959  long remaining = range.size();
1960  long offset = tag_data.sparse_offset;
1961  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
1962  if( tag_data.max_num_ents )
1963  {
1964  assert( tag_data.max_num_ents >= (unsigned long)remaining );
1965  num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
1966  }
1967  Range::const_iterator iter = range.begin();
1968  while( remaining )
1969  {
1971 
1972  // Write "chunk_size" blocks of data
1973  long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
1974  remaining -= count;
1975  Range::const_iterator stop = iter;
1976  stop += count;
1977  Range tmp;
1978  ;
1979  tmp.merge( iter, stop );
1980  iter = stop;
1981  assert( tmp.size() == (unsigned)count );
1982 
1983  rval = range_to_id_list( tmp, id_buffer );
1984  CHK_MB_ERR_0( rval );
1985 
1986  // Write the data
1987  dbgOut.print( 3, " writing sparse tag entity chunk.\n" );
1988  track.record_io( offset, count );
1989  mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status );
1990  CHK_MHDF_ERR_0( status );
1991 
1992  offset += count;
1993  --num_writes;
1994  } // while (remaining)
1995 
1996  // Do empty writes if necessary for parallel collective IO
1997  if( collectiveIO )
1998  {
1999  while( num_writes-- )
2000  {
2001  assert( writeProp != H5P_DEFAULT );
2002  dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
2003  mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status );
2004  CHK_MHDF_ERR_0( status );
2005  }
2006  }
2007 
2008  track.all_reduce();
2009  return MB_SUCCESS;
2010 }
2011 
2013  const std::string& name,
2014  DataType mb_data_type,
2015  hid_t value_type,
2016  int value_type_size )
2017 {
2018  ErrorCode rval;
2019  mhdf_Status status;
2020  hid_t tables[3];
2021  long table_size, data_size;
2022 
2024 
2025  // Get entities for which to write tag values
2026  Range range;
2027  rval = get_sparse_tagged_entities( tag_data, range );
2028 
2029  // Open tables to write info
2030  mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status );
2031  CHK_MHDF_ERR_0( status );
2032  assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
2033  // Fixed-length tag
2034  assert( table_size == data_size );
2035 
2036  // Write IDs for tagged entities
2037  subState.start( "writing sparse ids for tag: ", name.c_str() );
2038  rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
2039  subState.end( rval );
2040  CHK_MB_ERR_2( rval, tables, status );
2041  mhdf_closeData( filePtr, tables[0], &status );
2042  CHK_MHDF_ERR_1( status, tables[1] );
2043 
2044  // Set up data buffer for writing tag values
2045  IODebugTrack track( debugTrack, name + " Data", data_size );
2046  subState.start( "writing sparse values for tag: ", name.c_str() );
2047  rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type,
2048  value_type_size, tag_data.max_num_ents, track );
2049  subState.end( rval );
2050  CHK_MB_ERR_0( rval );
2051  mhdf_closeData( filePtr, tables[1], &status );
2052  CHK_MHDF_ERR_0( status );
2053 
2054  track.all_reduce();
2055  return MB_SUCCESS;
2056 }
2057 
2059  const Range& range,
2060  hid_t idx_table,
2061  size_t table_size,
2062  int /*type_size*/,
2063  const char* name )
2064 {
2065  ErrorCode rval;
2066  mhdf_Status status;
2067 
2069 
2070  std::string tname( name ? name : "<UNKNOWN TAG?>" );
2071  tname += " - End Indices";
2072  IODebugTrack track( debugTrack, tname, table_size );
2073 
2074  // Set up data buffer for writing indices
2075  size_t chunk_size = bufferSize / ( std::max( sizeof( void* ), sizeof( long ) ) + sizeof( int ) );
2076  mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer;
2077  const void** junk = (const void**)dataBuffer;
2078  int* size_buffer = (int*)( dataBuffer + chunk_size * std::max( sizeof( void* ), sizeof( mhdf_index_t ) ) );
2079 
2080  // Write IDs of tagged entities.
2081  long data_offset = tag_data.var_data_offset - 1; // Offset at which to write data buffer
2082  size_t remaining = range.size();
2083  size_t offset = tag_data.sparse_offset;
2084  size_t num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
2085  if( tag_data.max_num_ents )
2086  {
2087  assert( tag_data.max_num_ents >= (unsigned long)remaining );
2088  num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
2089  }
2090  Range::const_iterator iter = range.begin();
2091  while( remaining )
2092  {
2094 
2095  // Write "chunk_size" blocks of data
2096  size_t count = remaining > chunk_size ? chunk_size : remaining;
2097  remaining -= count;
2098  Range::const_iterator stop = iter;
2099  stop += count;
2100  Range tmp;
2101  tmp.merge( iter, stop );
2102  iter = stop;
2103  assert( tmp.size() == (unsigned)count );
2104 
2105  rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer );
2106  CHK_MB_ERR_0( rval );
2107 
2108  // Calculate end indices
2109  dbgOut.print( 3, " writing var-len tag offset chunk.\n" );
2110  track.record_io( offset, count );
2111  for( size_t i = 0; i < count; ++i )
2112  {
2113  data_offset += size_buffer[i];
2114  idx_buffer[i] = data_offset;
2115  }
2116 
2117  // Write
2118  mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status );
2119  CHK_MHDF_ERR_0( status );
2120 
2121  offset += count;
2122  --num_writes;
2123  } // while (remaining)
2124 
2125  // Do empty writes if necessary for parallel collective IO
2126  if( collectiveIO )
2127  {
2128  while( num_writes-- )
2129  {
2130  assert( writeProp != H5P_DEFAULT );
2131  dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
2132  mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status );
2133  CHK_MHDF_ERR_0( status );
2134  }
2135  }
2136 
2137  track.all_reduce();
2138  return MB_SUCCESS;
2139 }
2140 
2142  const Range& range,
2143  hid_t table,
2144  size_t table_size,
2145  bool handle_tag,
2146  hid_t hdf_type,
2147  int type_size,
2148  const char* name )
2149 {
2150  ErrorCode rval;
2151  mhdf_Status status;
2152 
2154  assert( !handle_tag || sizeof( EntityHandle ) == type_size );
2155 
2156  std::string tname( name ? name : "<UNKNOWN TAG?>" );
2157  tname += " - Values";
2158  IODebugTrack track( debugTrack, tname, table_size );
2159 
2160  const size_t buffer_size = bufferSize / type_size;
2161 
2162  size_t num_writes = ( table_size + buffer_size - 1 ) / buffer_size;
2163  if( collectiveIO )
2164  {
2165  assert( tag_data.max_num_vals > 0 );
2166  num_writes = ( tag_data.max_num_vals + buffer_size - 1 ) / buffer_size;
2167  }
2168 
2169  unsigned char* buffer = (unsigned char*)dataBuffer;
2170  const void* prev_data = 0; // Data left over from prev iteration
2171  size_t prev_len = 0;
2172  Range::const_iterator iter = range.begin();
2173  long offset = tag_data.var_data_offset;
2174  while( prev_data || iter != range.end() )
2175  {
2176  size_t count = 0;
2177  if( prev_data )
2178  {
2179  size_t len;
2180  const void* ptr = prev_data;
2181  if( prev_len <= buffer_size )
2182  {
2183  len = prev_len;
2184  prev_data = 0;
2185  prev_len = 0;
2186  }
2187  else
2188  {
2189  len = buffer_size;
2190  prev_data = ( (const char*)prev_data ) + buffer_size * type_size;
2191  prev_len -= buffer_size;
2192  }
2193 
2194  if( handle_tag )
2195  convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len );
2196  else
2197  memcpy( buffer, ptr, len * type_size );
2198  count = len;
2199  }
2200 
2201  for( ; count < buffer_size && iter != range.end(); ++iter )
2202  {
2203  int len;
2204  const void* ptr;
2205  rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len );
2206  CHK_MB_ERR_0( rval );
2207  if( len + count > buffer_size )
2208  {
2209  prev_len = len + count - buffer_size;
2210  len = buffer_size - count;
2211  prev_data = ( (const char*)ptr ) + len * type_size;
2212  }
2213 
2214  if( handle_tag )
2215  convert_handle_tag( (const EntityHandle*)ptr, ( (EntityHandle*)buffer ) + count, len );
2216  else
2217  memcpy( buffer + count * type_size, ptr, len * type_size );
2218  count += len;
2219  }
2220 
2221  track.record_io( offset, count );
2222  mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status );
2223  offset += count;
2224  CHK_MHDF_ERR_0( status );
2225  --num_writes;
2226  }
2227 
2228  // Do empty writes if necessary for parallel collective IO
2229  if( collectiveIO )
2230  {
2231  while( num_writes-- )
2232  {
2233  assert( writeProp != H5P_DEFAULT );
2234  dbgOut.print( 3, " writing empty var-len tag data chunk.\n" );
2235  mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status );
2236  CHK_MHDF_ERR_0( status );
2237  }
2238  }
2239 
2240  track.all_reduce();
2241  return MB_SUCCESS;
2242 }
2243 
2245  const std::string& name,
2246  DataType mb_data_type,
2247  hid_t hdf_type,
2248  int type_size )
2249 {
2250  ErrorCode rval;
2251  mhdf_Status status;
2252  hid_t tables[3];
2253  long table_size;
2254  long data_table_size;
2255 
2257 
2258  // Get entities for which to write tag values
2259  Range range;
2260  rval = get_sparse_tagged_entities( tag_data, range );
2261 
2262  // Open tables to write info
2263  mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status );
2264  CHK_MHDF_ERR_0( status );
2265  assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
2266 
2267  // Write IDs for tagged entities
2268  subState.start( "writing ids for var-len tag: ", name.c_str() );
2269  rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
2270  subState.end( rval );
2271  CHK_MB_ERR_2( rval, tables, status );
2272  mhdf_closeData( filePtr, tables[0], &status );
2273  CHK_MHDF_ERR_2( status, tables + 1 );
2274 
2275  // Write offsets for tagged entities
2276  subState.start( "writing indices for var-len tag: ", name.c_str() );
2277  rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() );
2278  subState.end( rval );
2279  CHK_MB_ERR_1( rval, tables[1], status );
2280  mhdf_closeData( filePtr, tables[2], &status );
2281  CHK_MHDF_ERR_1( status, tables[1] );
2282 
2283  // Write the actual tag data
2284  subState.start( "writing values for var-len tag: ", name.c_str() );
2285  rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type,
2286  type_size, name.c_str() );
2287  subState.end( rval );
2288  CHK_MB_ERR_0( rval );
2289  mhdf_closeData( filePtr, tables[1], &status );
2290  CHK_MHDF_ERR_0( status );
2291 
2292  return MB_SUCCESS;
2293 }
2294 
2296  const ExportSet& elem_data,
2297  const std::string& name,
2298  DataType mb_data_type,
2299  hid_t value_type,
2300  int value_type_size )
2301 {
2303 
2304  // Open tables to write info
2305  mhdf_Status status;
2306  long table_size;
2307  hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status );
2308  CHK_MHDF_ERR_0( status );
2309  assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size );
2310 
2311  IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size );
2312  ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type,
2313  value_type, value_type_size, elem_data.max_num_ents, track );
2314  CHK_MB_ERR_0( rval );
2315  mhdf_closeData( filePtr, table, &status );
2316  CHK_MHDF_ERR_0( status );
2317 
2318  return MB_SUCCESS;
2319 }
2320 
2322  hid_t data_table,
2323  unsigned long offset_in,
2324  const Range& range_in,
2325  DataType mb_data_type,
2326  hid_t value_type,
2327  int value_type_size,
2328  unsigned long max_num_ents,
2329  IODebugTrack& track )
2330 {
2331  mhdf_Status status;
2332 
2334 
2335  // Set up data buffer for writing tag values
2336  size_t chunk_size = bufferSize / value_type_size;
2337  assert( chunk_size > 0 );
2338  char* tag_buffer = (char*)dataBuffer;
2339 
2340  // Write the tag values
2341  size_t remaining = range_in.size();
2342  size_t offset = offset_in;
2343  Range::const_iterator iter = range_in.begin();
2344  long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
2345  if( max_num_ents )
2346  {
2347  assert( max_num_ents >= remaining );
2348  num_writes = ( max_num_ents + chunk_size - 1 ) / chunk_size;
2349  }
2350  while( remaining )
2351  {
2353 
2354  // Write "chunk_size" blocks of data
2355  long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
2356  remaining -= count;
2357  memset( tag_buffer, 0, count * value_type_size );
2358  Range::const_iterator stop = iter;
2359  stop += count;
2360  Range range;
2361  range.merge( iter, stop );
2362  iter = stop;
2363  assert( range.size() == (unsigned)count );
2364 
2365  ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer );
2366  CHK_MB_ERR_0( rval );
2367 
2368  // Convert EntityHandles to file ids
2369  if( mb_data_type == MB_TYPE_HANDLE )
2370  convert_handle_tag( reinterpret_cast< EntityHandle* >( tag_buffer ),
2371  count * value_type_size / sizeof( EntityHandle ) );
2372 
2373  // Write the data
2374  dbgOut.print( 2, " writing tag value chunk.\n" );
2375  track.record_io( offset, count );
2376  assert( value_type > 0 );
2377  mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status );
2378  CHK_MHDF_ERR_0( status );
2379 
2380  offset += count;
2381  --num_writes;
2382  } // while (remaining)
2383 
2384  // Do empty writes if necessary for parallel collective IO
2385  if( collectiveIO )
2386  {
2387  while( num_writes-- )
2388  {
2389  assert( writeProp != H5P_DEFAULT );
2390  dbgOut.print( 2, " writing empty tag value chunk.\n" );
2391  assert( value_type > 0 );
2392  mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status );
2393  CHK_MHDF_ERR_0( status );
2394  }
2395  }
2396 
2397  track.all_reduce();
2398  return MB_SUCCESS;
2399 }
2400 
2401 ErrorCode WriteHDF5::write_qa( const std::vector< std::string >& list )
2402 {
2403  const char* app = "MOAB";
2404  const char* vers = MOAB_VERSION;
2405  char date_str[64];
2406  char time_str[64];
2407 
2409 
2410  std::vector< const char* > strs( list.size() ? list.size() : 4 );
2411  if( list.size() == 0 )
2412  {
2413  time_t t = time( NULL );
2414  tm* lt = localtime( &t );
2415 #ifdef WIN32
2416  strftime( date_str, sizeof( date_str ), "%m/%d/%y", lt ); // VS 2008 does not support %D
2417  strftime( time_str, sizeof( time_str ), "%H:%M:%S", lt ); // VS 2008 does not support %T
2418 #else
2419  strftime( date_str, sizeof( date_str ), "%D", lt );
2420  strftime( time_str, sizeof( time_str ), "%T", lt );
2421 #endif
2422 
2423  strs[0] = app;
2424  strs[1] = vers;
2425  strs[2] = date_str;
2426  strs[3] = time_str;
2427  }
2428  else
2429  {
2430  for( unsigned int i = 0; i < list.size(); ++i )
2431  strs[i] = list[i].c_str();
2432  }
2433 
2434  mhdf_Status status;
2435  dbgOut.print( 2, " writing QA history.\n" );
2436  mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
2437  CHK_MHDF_ERR_0( status );
2438 
2439  return MB_SUCCESS;
2440 }
2441 
2442 /*
2443 ErrorCode WriteHDF5::register_known_tag_types(Interface* iface)
2444 {
2445  hid_t int4, double16;
2446  hsize_t dim[1];
2447  int error = 0;
2448  ErrorCode rval;
2449 
2450  dim[0] = 4;
2451  int4 = H5Tarray_create(H5T_NATIVE_INT, 1, dim, NULL);
2452 
2453  dim[0] = 16;
2454  double16 = H5Tarray_create(H5T_NATIVE_DOUBLE, 1, dim, NULL);
2455 
2456  if (int4 < 0 || double16 < 0)
2457  error = 1;
2458 
2459  struct { const char* name; hid_t type; } list[] = {
2460  { GLOBAL_ID_TAG_NAME, H5T_NATIVE_INT } ,
2461  { MATERIAL_SET_TAG_NAME, H5T_NATIVE_INT },
2462  { DIRICHLET_SET_TAG_NAME, H5T_NATIVE_INT },
2463  { NEUMANN_SET_TAG_NAME, H5T_NATIVE_INT },
2464  { HAS_MID_NODES_TAG_NAME, int4 },
2465  { GEOM_DIMENSION_TAG_NAME, H5T_NATIVE_INT },
2466  { MESH_TRANSFORM_TAG_NAME, double16 },
2467  { 0, 0 } };
2468 
2469  for (int i = 0; list[i].name; ++i) {
2470  if (list[i].type < 1) {
2471  ++error;
2472  continue;
2473  }
2474 
2475  Tag handle;
2476 
2477  std::string name("__hdf5_tag_type_");
2478  name += list[i].name;
2479 
2480  rval = iface->tag_get_handle(name.c_str(), handle);
2481  if (MB_TAG_NOT_FOUND == rval) {
2482  rval = iface->tag_create(name.c_str(), sizeof(hid_t), MB_TAG_SPARSE, handle, NULL);
2483  if (MB_SUCCESS != rval) {
2484  ++error;
2485  continue;
2486  }
2487 
2488  hid_t copy_id = H5Tcopy(list[i].type);
2489  const EntityHandle mesh = 0;
2490  rval = iface->tag_set_data(handle, &mesh, 1, &copy_id);
2491  if (MB_SUCCESS != rval) {
2492  ++error;
2493  continue;
2494  }
2495  }
2496  }
2497 
2498  H5Tclose(int4);
2499  H5Tclose(double16);
2500  return error ? MB_FAILURE : MB_SUCCESS;
2501 }
2502 */
2503 
2504 ErrorCode WriteHDF5::gather_tags( const Tag* user_tag_list, int num_tags )
2505 {
2506  ErrorCode result;
2507  std::vector< Tag > tag_list;
2508  std::vector< Tag >::iterator t_itor;
2509  Range range;
2510 
2511  // Get list of Tags to write
2512  result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags );
2513  CHK_MB_ERR_0( result );
2514 
2515  // Get list of tags
2516  for( t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor )
2517  {
2518  // Add tag to export list
2519  TagDesc tag_data;
2520  tag_data.write_sparse = false;
2521  tag_data.tag_id = *t_itor;
2522  tag_data.sparse_offset = 0;
2523  tag_data.var_data_offset = 0;
2524  tag_data.max_num_ents = 0;
2525  tag_data.max_num_vals = 0;
2526  tagList.push_back( tag_data );
2527  }
2528 
2529  return MB_SUCCESS;
2530 }
2531 
2532 // If we support parallel, then this function will have been
2533 // overridden with an alternate version in WriteHDF5Parallel
2534 // that supports parallel I/O. If we're here
2535 // then MOAB was not built with support for parallel HDF5 I/O.
2536 ErrorCode WriteHDF5::parallel_create_file( const char* /* filename */,
2537  bool /* overwrite */,
2538  const std::vector< std::string >& /* qa_records */,
2539  const FileOptions& /* opts */,
2540  const Tag* /* tag_list */,
2541  int /* num_tags */,
2542  int /* dimension */,
2543  double* /* times */ )
2544 {
2545  MB_SET_ERR( MB_NOT_IMPLEMENTED, "WriteHDF5 does not support parallel writing" );
2546 }
2547 
2549  bool overwrite,
2550  const std::vector< std::string >& qa_records,
2551  const Tag* user_tag_list,
2552  int num_user_tags,
2553  int dimension )
2554 {
2555  long first_id;
2556  mhdf_Status status;
2557  hid_t handle;
2558  std::list< ExportSet >::iterator ex_itor;
2559  ErrorCode rval;
2560 
2561  topState.start( "creating file" );
2562 
2563  const char* type_names[MBMAXTYPE];
2564  memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
2565  for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
2566  type_names[i] = CN::EntityTypeName( i );
2567 
2568  // Create the file
2569  filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
2570  CHK_MHDF_ERR_0( status );
2571  assert( !!filePtr );
2572 
2573  rval = write_qa( qa_records );
2574  CHK_MB_ERR_0( rval );
2575 
2576  // Create node table
2577  if( nodeSet.range.size() )
2578  {
2580  handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status );
2581  CHK_MHDF_ERR_0( status );
2582  mhdf_closeData( filePtr, handle, &status );
2583  CHK_MHDF_ERR_0( status );
2584  nodeSet.first_id = (wid_t)first_id;
2586  CHK_MB_ERR_0( rval );
2587  }
2588  else
2589  {
2590  nodeSet.first_id = std::numeric_limits< wid_t >::max();
2591  }
2592  nodeSet.offset = 0;
2593 
2594  // Create element tables
2595  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
2596  {
2597  ex_itor->total_num_ents = ex_itor->range.size();
2598  rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id );
2599  CHK_MB_ERR_0( rval );
2600 
2601  ex_itor->first_id = (wid_t)first_id;
2602  ex_itor->offset = 0;
2603  rval = assign_ids( ex_itor->range, ex_itor->first_id );
2604  CHK_MB_ERR_0( rval );
2605  }
2606  // Create set tables
2607  writeSets = !setSet.range.empty();
2608  if( writeSets )
2609  {
2610  long contents_len, children_len, parents_len;
2611 
2614  rval = create_set_meta( setSet.total_num_ents, first_id );
2615  CHK_MB_ERR_0( rval );
2616 
2617  setSet.first_id = (wid_t)first_id;
2618  rval = assign_ids( setSet.range, setSet.first_id );
2619  CHK_MB_ERR_0( rval );
2620 
2621  rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
2622  CHK_MB_ERR_0( rval );
2623 
2624  rval = create_set_tables( contents_len, children_len, parents_len );
2625  CHK_MB_ERR_0( rval );
2626 
2627  setSet.offset = 0;
2628  setContentsOffset = 0;
2629  setChildrenOffset = 0;
2630  setParentsOffset = 0;
2631  writeSetContents = !!contents_len;
2632  writeSetChildren = !!children_len;
2633  writeSetParents = !!parents_len;
2634 
2635  maxNumSetContents = contents_len;
2636  maxNumSetChildren = children_len;
2637  maxNumSetParents = parents_len;
2638  } // if (!setSet.range.empty())
2639 
2640  // Create adjacency table after set table, because sets do not have yet an id
2641  // some entities are adjacent to sets (exodus?)
2642  // Create node adjacency table
2643  wid_t num_adjacencies;
2644 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
2645  rval = count_adjacencies( nodeSet.range, num_adjacencies );
2646  CHK_MB_ERR_0( rval );
2647  nodeSet.adj_offset = 0;
2648  nodeSet.max_num_adjs = num_adjacencies;
2649  if( num_adjacencies > 0 )
2650  {
2651  handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status );
2652  CHK_MHDF_ERR_0( status );
2653  mhdf_closeData( filePtr, handle, &status );
2654  }
2655 #endif
2656 
2657  // Create element adjacency tables
2658  for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
2659  {
2660  rval = count_adjacencies( ex_itor->range, num_adjacencies );
2661  CHK_MB_ERR_0( rval );
2662 
2663  ex_itor->adj_offset = 0;
2664  ex_itor->max_num_adjs = num_adjacencies;
2665  if( num_adjacencies > 0 )
2666  {
2667  handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status );
2668  CHK_MHDF_ERR_0( status );
2669  mhdf_closeData( filePtr, handle, &status );
2670  }
2671  }
2672 
2673  dbgOut.tprint( 1, "Gathering Tags\n" );
2674 
2675  rval = gather_tags( user_tag_list, num_user_tags );
2676  CHK_MB_ERR_0( rval );
2677 
2678  // Create the tags and tag data tables
2679  std::list< TagDesc >::iterator tag_iter = tagList.begin();
2680  for( ; tag_iter != tagList.end(); ++tag_iter )
2681  {
2682  // As we haven't yet added any ExportSets for which to write
2683  // dense tag data to the TagDesc struct pointed to by
2684  // tag_iter, this call will initially return all tagged entities
2685  // in the set of entities to be written.
2686  Range range;
2687  rval = get_sparse_tagged_entities( *tag_iter, range );
2688  CHK_MB_ERR_0( rval );
2689 
2690  int s;
2691  bool var_len = ( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) );
2692 
2693  // Determine which ExportSets we want to write dense
2694  // data for. We never write dense data for variable-length
2695  // tag data.
2696  if( !var_len && writeTagDense )
2697  {
2698  // Check if we want to write this tag in dense format even if not
2699  // all of the entities have a tag value. The criterion of this
2700  // is that the tag be dense, have a default value, and have at
2701  // least 2/3 of the entities tagged.
2702  bool prefer_dense = false;
2703  TagType type;
2704  rval = iFace->tag_get_type( tag_iter->tag_id, type );
2705  CHK_MB_ERR_0( rval );
2706  if( MB_TAG_DENSE == type )
2707  {
2708  const void* defval = 0;
2709  rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
2710  if( MB_SUCCESS == rval ) prefer_dense = true;
2711  }
2712 
2713  if( check_dense_format_tag( nodeSet, range, prefer_dense ) )
2714  {
2715  range -= nodeSet.range;
2716  tag_iter->dense_list.push_back( nodeSet );
2717  }
2718 
2719  std::list< ExportSet >::const_iterator ex = exportList.begin();
2720  for( ; ex != exportList.end(); ++ex )
2721  {
2722  if( check_dense_format_tag( *ex, range, prefer_dense ) )
2723  {
2724  range -= ex->range;
2725  tag_iter->dense_list.push_back( *ex );
2726  }
2727  }
2728 
2729  if( check_dense_format_tag( setSet, range, prefer_dense ) )
2730  {
2731  range -= setSet.range;
2732  tag_iter->dense_list.push_back( setSet );
2733  }
2734  }
2735 
2736  tag_iter->write_sparse = !range.empty();
2737 
2738  unsigned long var_len_total = 0;
2739  if( var_len )
2740  {
2741  rval = get_tag_data_length( *tag_iter, range, var_len_total );
2742  CHK_MB_ERR_0( rval );
2743  }
2744 
2745  rval = create_tag( *tag_iter, range.size(), var_len_total );
2746  CHK_MB_ERR_0( rval );
2747  } // for (tags)
2748 
2749  topState.end();
2750  return MB_SUCCESS;
2751 }
2752 
2753 bool WriteHDF5::check_dense_format_tag( const ExportSet& ents, const Range& all_tagged, bool prefer_dense )
2754 {
2755  // If there are no tagged entities, then don't write anything
2756  if( ents.range.empty() ) return false;
2757 
2758  // If all of the entities are tagged, then write in dense format
2759  if( all_tagged.contains( ents.range ) ) return true;
2760 
2761  // Unless asked for more lenient choice of dense format, return false
2762  if( !prefer_dense ) return false;
2763 
2764  // If we're being lenient about choosing dense format, then
2765  // return true if at least 2/3 of the entities are tagged.
2766  Range xsect = intersect( setSet.range, all_tagged );
2767  if( 3 * xsect.size() >= 2 * setSet.range.size() ) return true;
2768 
2769  return false;
2770 }
2771 
2773 {
2774  ErrorCode rval;
2775  std::vector< wid_t > adj_list;
2776  Range::const_iterator iter = set.begin();
2777  const Range::const_iterator end = set.end();
2778  result = 0;
2779  for( ; iter != end; ++iter )
2780  {
2781  adj_list.clear();
2782  rval = get_adjacencies( *iter, adj_list );
2783  CHK_MB_ERR_0( rval );
2784 
2785  if( adj_list.size() > 0 ) result += 2 + adj_list.size();
2786  }
2787 
2788  return MB_SUCCESS;
2789 }
2790 
2791 ErrorCode WriteHDF5::create_elem_table( const ExportSet& block, long num_entities, long& first_id_out )
2792 {
2793  mhdf_Status status;
2794  hid_t handle;
2795 
2797 
2798  mhdf_addElement( filePtr, block.name(), block.type, &status );
2799  CHK_MHDF_ERR_0( status );
2800 
2801  handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status );
2802  CHK_MHDF_ERR_0( status );
2803  mhdf_closeData( filePtr, handle, &status );
2804  CHK_MHDF_ERR_0( status );
2805 
2806  return MB_SUCCESS;
2807 }
2808 
2810  long& contents_length_out,
2811  long& children_length_out,
2812  long& parents_length_out )
2813 {
2814  ErrorCode rval;
2815  Range set_contents;
2816  long contents_length_set, children_length_set, parents_length_set;
2817  unsigned long flags;
2818  std::vector< wid_t > set_contents_ids;
2819  std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
2820 
2821  contents_length_out = 0;
2822  children_length_out = 0;
2823  parents_length_out = 0;
2824 
2825  for( Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter )
2826  {
2827  while( si != specialSets.end() && si->setHandle < *iter )
2828  ++si;
2829 
2830  if( si != specialSets.end() && si->setHandle == *iter )
2831  {
2832  contents_length_out += si->contentIds.size();
2833  children_length_out += si->childIds.size();
2834  parents_length_out += si->parentIds.size();
2835  ++si;
2836  continue;
2837  }
2838 
2839  rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags );
2840  CHK_MB_ERR_0( rval );
2841 
2842  // Check if can and should compress as ranges
2843  if( !( flags & MESHSET_ORDERED ) && contents_length_set )
2844  {
2845  set_contents.clear();
2846  rval = iFace->get_entities_by_handle( *iter, set_contents, false );
2847  CHK_MB_ERR_0( rval );
2848 
2849  bool blocked_list;
2850  rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list );
2851  CHK_MB_ERR_0( rval );
2852 
2853  if( blocked_list )
2854  {
2855  assert( set_contents_ids.size() % 2 == 0 );
2856  contents_length_set = set_contents_ids.size();
2857  }
2858  }
2859 
2860  contents_length_out += contents_length_set;
2861  children_length_out += children_length_set;
2862  parents_length_out += parents_length_set;
2863  }
2864 
2865  return MB_SUCCESS;
2866 }
2867 
2868 ErrorCode WriteHDF5::create_set_meta( long num_sets, long& first_id_out )
2869 {
2870  hid_t handle;
2871  mhdf_Status status;
2872 
2874 
2875  handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status );
2876  CHK_MHDF_ERR_0( status );
2877  mhdf_closeData( filePtr, handle, &status );
2878 
2879  return MB_SUCCESS;
2880 }
2881 
2883 {
2884  SpecialSetData tmp;
2885  tmp.setHandle = h;
2886  std::vector< SpecialSetData >::iterator i;
2887  i = std::lower_bound( specialSets.begin(), specialSets.end(), tmp, SpecSetLess() );
2888  return ( i == specialSets.end() || i->setHandle != h ) ? 0 : &*i;
2889 }
2890 
2891 ErrorCode WriteHDF5::create_set_tables( long num_set_contents, long num_set_children, long num_set_parents )
2892 {
2893  hid_t handle;
2894  mhdf_Status status;
2895 
2897 
2898  if( num_set_contents > 0 )
2899  {
2900  handle = mhdf_createSetData( filePtr, num_set_contents, &status );
2901  CHK_MHDF_ERR_0( status );
2902  mhdf_closeData( filePtr, handle, &status );
2903  }
2904 
2905  if( num_set_children > 0 )
2906  {
2907  handle = mhdf_createSetChildren( filePtr, num_set_children, &status );
2908  CHK_MHDF_ERR_0( status );
2909  mhdf_closeData( filePtr, handle, &status );
2910  }
2911 
2912  if( num_set_parents > 0 )
2913  {
2914  handle = mhdf_createSetParents( filePtr, num_set_parents, &status );
2915  CHK_MHDF_ERR_0( status );
2916  mhdf_closeData( filePtr, handle, &status );
2917  }
2918 
2919  return MB_SUCCESS;
2920 }
2921 
2923  DataType& moab_type,
2924  int& num_bytes,
2925  int& type_size,
2926  int& array_length,
2927  mhdf_TagDataType& file_type,
2928  hid_t& hdf_type )
2929 {
2930  ErrorCode rval;
2931  Tag type_handle;
2932  std::string tag_name, tag_type_name;
2933 
2935 
2936  // We return NULL for hdf_type if it can be determined from
2937  // the file_type. The only case where it is non-zero is
2938  // if the user specified a specific type via a mesh tag.
2939  hdf_type = (hid_t)0;
2940  bool close_hdf_type = false;
2941 
2942  rval = iFace->tag_get_data_type( tag, moab_type );
2943  CHK_MB_ERR_0( rval );
2944  rval = iFace->tag_get_length( tag, array_length );
2945  if( MB_VARIABLE_DATA_LENGTH == rval )
2946  {
2947  array_length = MB_VARIABLE_LENGTH;
2948  }
2949  else if( MB_SUCCESS != rval )
2950  return error( rval );
2951  rval = iFace->tag_get_bytes( tag, num_bytes );
2952  if( MB_VARIABLE_DATA_LENGTH == rval )
2953  num_bytes = MB_VARIABLE_LENGTH;
2954  else if( MB_SUCCESS != rval )
2955  return error( rval );
2956 
2957  switch( moab_type )
2958  {
2959  case MB_TYPE_INTEGER:
2960  type_size = sizeof( int );
2961  file_type = mhdf_INTEGER;
2962  hdf_type = H5T_NATIVE_INT;
2963  close_hdf_type = false;
2964  break;
2965  case MB_TYPE_DOUBLE:
2966  type_size = sizeof( double );
2967  file_type = mhdf_FLOAT;
2968  hdf_type = H5T_NATIVE_DOUBLE;
2969  close_hdf_type = false;
2970  break;
2971  case MB_TYPE_BIT:
2972  type_size = sizeof( bool );
2973  file_type = mhdf_BITFIELD;
2974  assert( array_length <= 8 );
2975  hdf_type = H5Tcopy( H5T_NATIVE_B8 );
2976  H5Tset_precision( hdf_type, array_length );
2977  close_hdf_type = true;
2978  break;
2979  case MB_TYPE_HANDLE:
2980  type_size = sizeof( EntityHandle );
2981  file_type = mhdf_ENTITY_ID;
2982  hdf_type = id_type;
2983  close_hdf_type = false;
2984  break;
2985  case MB_TYPE_OPAQUE:
2986  file_type = mhdf_OPAQUE;
2987  rval = iFace->tag_get_name( tag, tag_name );
2988  CHK_MB_ERR_0( rval );
2989  tag_type_name = "__hdf5_tag_type_";
2990  tag_type_name += tag_name;
2991  rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY );
2992  if( MB_TAG_NOT_FOUND == rval )
2993  {
2994  if( num_bytes == MB_VARIABLE_LENGTH )
2995  type_size = 1;
2996  else
2997  type_size = num_bytes;
2998  hdf_type = H5Tcreate( H5T_OPAQUE, type_size );
2999  close_hdf_type = true;
3000  }
3001  else if( MB_SUCCESS == rval )
3002  {
3003  int hsize;
3004  rval = iFace->tag_get_bytes( type_handle, hsize );
3005  if( hsize != sizeof( hid_t ) ) return error( MB_FAILURE );
3006 
3007  const EntityHandle root = 0;
3008  rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
3009  if( rval != MB_SUCCESS ) return error( rval );
3010 
3011  type_size = H5Tget_size( hdf_type );
3012  if( type_size != num_bytes ) return error( MB_FAILURE );
3013 
3014  close_hdf_type = false;
3015  }
3016  else
3017  return error( rval );
3018  num_bytes = array_length;
3019  array_length = ( num_bytes == MB_VARIABLE_LENGTH ) ? MB_VARIABLE_LENGTH : 1;
3020  break;
3021  default:
3022  break;
3023  }
3024 
3025  assert( num_bytes == MB_VARIABLE_LENGTH || ( moab_type == MB_TYPE_BIT && num_bytes == 1 ) ||
3026  array_length * type_size == num_bytes );
3027 
3028  if( num_bytes == MB_VARIABLE_LENGTH )
3029  {
3030  array_length = MB_VARIABLE_LENGTH;
3031  if( !close_hdf_type )
3032  {
3033  hdf_type = H5Tcopy( hdf_type );
3034  // close_hdf_type = true;
3035  }
3036  }
3037  else if( array_length > 1 && moab_type != MB_TYPE_BIT )
3038  {
3039  hsize_t len = array_length;
3040 #if defined( H5Tarray_create_vers ) && ( H5Tarray_create_vers > 1 )
3041  hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len );
3042 #else
3043  hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL );
3044 #endif
3045  if( close_hdf_type ) H5Tclose( hdf_type );
3046  hdf_type = temp_id;
3047  }
3048  else if( !close_hdf_type )
3049  {
3050  hdf_type = H5Tcopy( hdf_type );
3051  // close_hdf_type = true;
3052  }
3053 
3054  return MB_SUCCESS;
3055 }
3056 
3057 ErrorCode WriteHDF5::get_tag_data_length( const TagDesc& tag_info, const Range& range, unsigned long& result )
3058 {
3059  ErrorCode rval;
3060  result = 0;
3061 
3062  // Split buffer into two pieces, one for pointers and one for sizes
3063  size_t step, remaining;
3064  step = bufferSize / ( sizeof( int ) + sizeof( void* ) );
3065  const void** ptr_buffer = reinterpret_cast< const void** >( dataBuffer );
3066  int* size_buffer = reinterpret_cast< int* >( ptr_buffer + step );
3067  Range subrange;
3068  Range::const_iterator iter = range.begin();
3069  for( remaining = range.size(); remaining >= step; remaining -= step )
3070  {
3071  // Get subset of range containing 'count' entities
3072  Range::const_iterator end = iter;
3073  end += step;
3074  subrange.clear();
3075  subrange.merge( iter, end );
3076  iter = end;
3077  // Get tag sizes for entities
3078  rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
3079  if( MB_SUCCESS != rval ) return error( rval );
3080  // Sum lengths
3081  for( size_t i = 0; i < step; ++i )
3082  result += size_buffer[i];
3083  }
3084  // Process remaining
3085  subrange.clear();
3086  subrange.merge( iter, range.end() );
3087  assert( subrange.size() == remaining );
3088  rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
3089  if( MB_SUCCESS != rval ) return error( rval );
3090  for( size_t i = 0; i < remaining; ++i )
3091  result += size_buffer[i];
3092 
3093  return MB_SUCCESS;
3094 }
3095 
3097  unsigned long num_sparse_entities,
3098  unsigned long data_table_size )
3099 {
3100  TagType mb_storage;
3101  DataType mb_type;
3102  mhdf_TagDataType mhdf_type;
3103  int tag_bytes, type_size, num_vals, storage;
3104  hid_t hdf_type = (hid_t)0;
3105  hid_t handles[3];
3106  std::string tag_name;
3107  ErrorCode rval;
3108  mhdf_Status status;
3109 
3111 
3112  // Get tag properties
3113  rval = iFace->tag_get_type( tag_data.tag_id, mb_storage );
3114  CHK_MB_ERR_0( rval );
3115  switch( mb_storage )
3116  {
3117  case MB_TAG_DENSE:
3118  storage = mhdf_DENSE_TYPE;
3119  break;
3120  case MB_TAG_SPARSE:
3121  storage = mhdf_SPARSE_TYPE;
3122  break;
3123  case MB_TAG_BIT:
3124  storage = mhdf_BIT_TYPE;
3125  break;
3126  case MB_TAG_MESH:
3127  storage = mhdf_MESH_TYPE;
3128  break;
3129  default:
3130  return error( MB_FAILURE );
3131  }
3132  rval = iFace->tag_get_name( tag_data.tag_id, tag_name );
3133  CHK_MB_ERR_0( rval );
3134  rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type );
3135  CHK_MB_ERR_0( rval );
3136 
3137  // Get default value
3138  const void *def_value, *mesh_value;
3139  int def_val_len, mesh_val_len;
3140  rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len );
3141  if( MB_ENTITY_NOT_FOUND == rval )
3142  {
3143  def_value = 0;
3144  def_val_len = 0;
3145  }
3146  else if( MB_SUCCESS != rval )
3147  {
3148  H5Tclose( hdf_type );
3149  return error( rval );
3150  }
3151 
3152  // Get mesh value
3153  unsigned char byte;
3154  const EntityHandle root = 0;
3155  if( mb_storage == MB_TAG_BIT )
3156  {
3157  rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte );
3158  mesh_value = &byte;
3159  mesh_val_len = 1;
3160  }
3161  else
3162  {
3163  rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len );
3164  }
3165  if( MB_TAG_NOT_FOUND == rval )
3166  {
3167  mesh_value = 0;
3168  mesh_val_len = 0;
3169  }
3170  else if( MB_SUCCESS != rval )
3171  {
3172  H5Tclose( hdf_type );
3173  return error( rval );
3174  }
3175 
3176  // For handle-type tags, need to convert from handles to file ids
3177  if( MB_TYPE_HANDLE == mb_type )
3178  {
3179  // Make sure there's room in the buffer for both
3180  assert( ( def_val_len + mesh_val_len ) * sizeof( long ) < (size_t)bufferSize );
3181 
3182  // Convert default value
3183  if( def_value )
3184  {
3185  memcpy( dataBuffer, def_value, def_val_len * sizeof( EntityHandle ) );
3186  convert_handle_tag( reinterpret_cast< EntityHandle* >( dataBuffer ), def_val_len );
3187  def_value = dataBuffer;
3188  }
3189 
3190  // Convert mesh value
3191  if( mesh_value )
3192  {
3193  EntityHandle* ptr = reinterpret_cast< EntityHandle* >( dataBuffer ) + def_val_len;
3194  memcpy( ptr, mesh_value, mesh_val_len * sizeof( EntityHandle ) );
3195  if( convert_handle_tag( ptr, mesh_val_len ) )
3196  mesh_value = ptr;
3197  else
3198  mesh_value = 0;
3199  }
3200  }
3201 
3202  if( MB_VARIABLE_LENGTH != tag_bytes )
3203  {
3204  // Write the tag description to the file
3205  mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type,
3206  mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
3207  CHK_MHDF_ERR_0( status );
3208  H5Tclose( hdf_type );
3209 
3210  // Create empty table for tag data
3211  if( num_sparse_entities )
3212  {
3213  mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status );
3214  CHK_MHDF_ERR_0( status );
3215  mhdf_closeData( filePtr, handles[0], &status );
3216  mhdf_closeData( filePtr, handles[1], &status );
3217  }
3218 
3219  for( size_t i = 0; i < tag_data.dense_list.size(); ++i )
3220  {
3221  const ExportSet* ex = find( tag_data.dense_list[i] );
3222  assert( 0 != ex );
3223  handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status );
3224  CHK_MHDF_ERR_0( status );
3225  mhdf_closeData( filePtr, handles[0], &status );
3226  }
3227  }
3228  else
3229  {
3230  mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value,
3231  mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
3232  CHK_MHDF_ERR_0( status );
3233  H5Tclose( hdf_type );
3234 
3235  // Create empty table for tag data
3236  if( num_sparse_entities )
3237  {
3238  mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles,
3239  &status );
3240  CHK_MHDF_ERR_0( status );
3241  mhdf_closeData( filePtr, handles[0], &status );
3242  mhdf_closeData( filePtr, handles[1], &status );
3243  mhdf_closeData( filePtr, handles[2], &status );
3244  }
3245  }
3246 
3247  return MB_SUCCESS;
3248 }
3249 
3251 {
3252  Range tmp;
3253  ErrorCode rval = get_sparse_tagged_entities( tag, tmp );
3254  count = tmp.size();
3255  return rval;
3256 }
3257 
3259 {
3260  results.clear();
3261  if( !tag.have_dense( setSet ) ) results.merge( setSet.range );
3262  std::list< ExportSet >::reverse_iterator e;
3263  for( e = exportList.rbegin(); e != exportList.rend(); ++e )
3264  {
3265  if( !tag.have_dense( *e ) ) results.merge( e->range );
3266  }
3267  if( !tag.have_dense( nodeSet ) ) results.merge( nodeSet.range );
3268  if( results.empty() ) return MB_SUCCESS;
3269 
3270  return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT );
3271 }
3272 
3274 {
3275  range.clear();
3276  range.merge( setSet.range );
3277  std::list< ExportSet >::reverse_iterator e;
3278  for( e = exportList.rbegin(); e != exportList.rend(); ++e )
3279  range.merge( e->range );
3280  range.merge( nodeSet.range );
3281 }
3282 
3284 {
3285  print_id_map( std::cout, "" );
3286 }
3287 
3288 void WriteHDF5::print_id_map( std::ostream& s, const char* pfx ) const
3289 {
3291  for( i = idMap.begin(); i != idMap.end(); ++i )
3292  {
3293  const char* n1 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin ) );
3294  EntityID id = ID_FROM_HANDLE( i->begin );
3295  if( 1 == i->count )
3296  {
3297  s << pfx << n1 << " " << id << " -> " << i->value << std::endl;
3298  }
3299  else
3300  {
3301  const char* n2 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin + i->count - 1 ) );
3302  if( n1 == n2 )
3303  {
3304  s << pfx << n1 << " " << id << "-" << id + i->count - 1 << " -> " << i->value << "-"
3305  << i->value + i->count - 1 << std::endl;
3306  }
3307  else
3308  {
3309  s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE( i->begin + i->count - 1 ) << " -> "
3310  << i->value << "-" << i->value + i->count - 1 << std::endl;
3311  }
3312  }
3313  }
3314 }
3315 
3316 void WriteHDF5::print_times( const double* t ) const
3317 {
3318  std::cout << "WriteHDF5: " << t[TOTAL_TIME] << std::endl
3319  << " gather mesh: " << t[GATHER_TIME] << std::endl
3320  << " create file: " << t[CREATE_TIME] << std::endl
3321  << " create nodes: " << t[CREATE_NODE_TIME] << std::endl
3322  << " negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl
3323  << " create elem: " << t[CREATE_ELEM_TIME] << std::endl
3324  << " file id exch: " << t[FILEID_EXCHANGE_TIME] << std::endl
3325  << " create adj: " << t[CREATE_ADJ_TIME] << std::endl
3326  << " create set: " << t[CREATE_SET_TIME] << std::endl
3327  << " shared ids: " << t[SHARED_SET_IDS] << std::endl
3328  << " shared data: " << t[SHARED_SET_CONTENTS] << std::endl
3329  << " set offsets: " << t[SET_OFFSET_TIME] << std::endl
3330  << " create tags: " << t[CREATE_TAG_TIME] << std::endl
3331  << " coordinates: " << t[COORD_TIME] << std::endl
3332  << " connectivity: " << t[CONN_TIME] << std::endl
3333  << " sets: " << t[SET_TIME] << std::endl
3334  << " set descrip: " << t[SET_META] << std::endl
3335  << " set content: " << t[SET_CONTENT] << std::endl
3336  << " set parent: " << t[SET_PARENT] << std::endl
3337  << " set child: " << t[SET_CHILD] << std::endl
3338  << " adjacencies: " << t[ADJ_TIME] << std::endl
3339  << " tags: " << t[TAG_TIME] << std::endl
3340  << " dense data: " << t[DENSE_TAG_TIME] << std::endl
3341  << " sparse data: " << t[SPARSE_TAG_TIME] << std::endl
3342  << " var-len data: " << t[VARLEN_TAG_TIME] << std::endl;
3343 }
3344 
3345 } // namespace moab