MOAB: Mesh Oriented datABase  (version 5.5.0)
parallel_hdf5_test.cpp
Go to the documentation of this file.
1 #include "moab/Range.hpp"
2 #include "TestUtil.hpp"
3 
4 #include "moab/Core.hpp"
5 #include "moab/ParallelComm.hpp"
6 #include "moab/Skinner.hpp"
7 #include "MBTagConventions.hpp"
8 #include "moab/CN.hpp"
10 
11 #include <iostream>
12 #include <sstream>
13 #include <algorithm>
14 #include "moab_mpi.h"
15 #include <unistd.h>
16 #include <cfloat>
17 #include <cstdio>
18 #include <ctime>
19 
20 using namespace moab;
21 
22 #ifdef MESHDIR
23 const char* InputFile = STRINGIFY( MESHDIR ) "/ptest.cub";
24 const char* InputMix = STRINGIFY( MESHDIR ) "/io/mix.h5m";
25 const char* InputOneSide = STRINGIFY( MESHDIR ) "/io/oneside.h5m";
26 #else
27 #error Specify MESHDIR to compile test
28 #endif
29 
30 void load_and_partition( Interface& moab, const char* filename, bool print_debug = false );
31 
32 void save_and_load_on_root( Interface& moab, const char* tmp_filename );
33 
34 void check_identical_mesh( Interface& moab1, Interface& moab2 );
35 
36 void test_write_elements();
39 
40 void test_read_elements_common( bool by_rank, int intervals, bool print_time, const char* extra_opts = 0 );
41 
42 int ReadIntervals = 0;
44 {
46 }
48 {
50 }
52 {
53  test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=yes" );
54 }
56 {
57  test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=no" );
58 }
59 void test_read_time();
60 
61 void test_read_tags();
63 void test_read_sets_common( const char* extra_opts = 0 );
65 {
67 }
69 {
70  test_read_sets_common( "BCAST_DUPLICATE_READS=yes" );
71 }
73 {
74  test_read_sets_common( "BCAST_DUPLICATE_READS=no" );
75 }
76 void test_read_bc_sets();
77 
80 void test_write_polygons();
84 
85 const char PARTITION_TAG[] = "PARTITION";
86 
87 bool KeepTmpFiles = false;
88 bool PauseOnStart = false;
89 bool HyperslabAppend = false;
90 const int DefaultReadIntervals = 2;
93 int ReadBlocks = 1;
94 
95 enum Mode
96 {
100 };
102 const bool DEFAULT_BY_RANK = false;
103 
104 std::string get_read_options( bool by_rank = DEFAULT_BY_RANK, Mode mode = DEFAULT_MODE, const char* extra_opts = 0 );
105 std::string get_read_options( const char* extra_opts )
106 {
107  return get_read_options( DEFAULT_BY_RANK, DEFAULT_MODE, extra_opts );
108 }
109 std::string get_read_options( bool by_rank, const char* extra_opts )
110 {
111  return get_read_options( by_rank, DEFAULT_MODE, extra_opts );
112 }
113 
114 std::string get_read_options( bool by_rank, Mode mode, const char* extra_opts )
115 {
116  int numproc;
117  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
118 
119  // do parallel read unless only one processor
120  std::ostringstream str;
121  if( numproc > 1 )
122  {
123  str << "PARALLEL=";
124  switch( mode )
125  {
126  case READ_PART:
127  str << "READ_PART";
128  break;
129  case READ_DELETE:
130  str << "READ_DELETE";
131  break;
132  case BCAST_DELETE:
133  str << "BCAST_DELETE";
134  break;
135  }
136  str << ";PARTITION=" << PARTITION_TAG << ";";
137  if( by_rank ) str << "PARTITION_BY_RANK;";
138  }
139 
140  if( extra_opts ) str << extra_opts << ";";
141 
142  if( ReadDebugLevel ) str << "DEBUG_IO=" << ReadDebugLevel << ";";
143 
144  if( HyperslabAppend ) str << "HYPERSLAB_APPEND;HYPERSLAB_SELECT_LIMIT=2147483647";
145 
146  return str.str();
147 }
148 
149 int main( int argc, char* argv[] )
150 {
151  int err = MPI_Init( &argc, &argv );
152  CHECK( !err );
153 
154  for( int i = 1; i < argc; ++i )
155  {
156  if( !strcmp( argv[i], "-k" ) )
157  KeepTmpFiles = true;
158  else if( !strcmp( argv[i], "-p" ) )
159  PauseOnStart = true;
160  else if( !strcmp( argv[i], "-R" ) )
161  {
162  ++i;
163  CHECK( i < argc );
164  ReadIntervals = atoi( argv[i] );
165  CHECK( ReadIntervals > 0 );
166  }
167  else if( !strcmp( argv[i], "-b" ) )
168  {
169  ++i;
170  CHECK( i < argc );
171  ReadBlocks = atoi( argv[i] );
172  CHECK( ReadBlocks > 0 );
173  }
174  else if( !strcmp( argv[i], "-r" ) )
175  {
176  ++i;
177  CHECK( i < argc );
178  ReadDebugLevel = atoi( argv[i] );
179  CHECK( ReadDebugLevel > 0 );
180  }
181  else if( !strcmp( argv[i], "-w" ) )
182  {
183  ++i;
184  CHECK( i < argc );
185  WriteDebugLevel = atoi( argv[i] );
186  CHECK( WriteDebugLevel > 0 );
187  }
188  else if( !strcmp( argv[i], "-A" ) )
189  HyperslabAppend = true;
190  else
191  {
192  std::cerr << "Usage: " << argv[0]
193  << " [-k] [-p] [-R <intervals>] [-r <level>] [-w <level>] [-b <blocks>] [-A]" << std::endl;
194  return 1;
195  }
196  }
197 
198  if( PauseOnStart )
199  {
200  int rank;
201  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
202  printf( "Rank %2d PID %lu\n", rank, (unsigned long)getpid() );
203  sleep( 30 );
204  }
205 
206  int result = 0;
207  if( ReadIntervals )
208  {
209  result = RUN_TEST( test_read_time );
210  }
211  else
212  {
214  result += RUN_TEST( test_write_elements );
215  MPI_Barrier( MPI_COMM_WORLD );
216  result += RUN_TEST( test_write_shared_sets );
217  MPI_Barrier( MPI_COMM_WORLD );
218  result += RUN_TEST( test_var_length_parallel );
219  MPI_Barrier( MPI_COMM_WORLD );
220  result += RUN_TEST( test_read_elements );
221  MPI_Barrier( MPI_COMM_WORLD );
223  MPI_Barrier( MPI_COMM_WORLD );
224  result += RUN_TEST( test_read_tags );
225  MPI_Barrier( MPI_COMM_WORLD );
226  result += RUN_TEST( test_read_global_tags );
227  MPI_Barrier( MPI_COMM_WORLD );
228  result += RUN_TEST( test_read_sets );
229  MPI_Barrier( MPI_COMM_WORLD );
230  result += RUN_TEST( test_read_sets_bcast_dups );
231  MPI_Barrier( MPI_COMM_WORLD );
232  result += RUN_TEST( test_read_sets_read_dups );
233  MPI_Barrier( MPI_COMM_WORLD );
234  result += RUN_TEST( test_read_bc_sets );
235  MPI_Barrier( MPI_COMM_WORLD );
237  MPI_Barrier( MPI_COMM_WORLD );
238  result += RUN_TEST( test_write_different_tags );
239  MPI_Barrier( MPI_COMM_WORLD );
240  result += RUN_TEST( test_write_polygons );
241  MPI_Barrier( MPI_COMM_WORLD );
242  result += RUN_TEST( test_write_unbalanced );
243  MPI_Barrier( MPI_COMM_WORLD );
244  result += RUN_TEST( test_write_dense_tags );
245  MPI_Barrier( MPI_COMM_WORLD );
246  result += RUN_TEST( test_read_non_adjs_side );
247  MPI_Barrier( MPI_COMM_WORLD );
248  }
249 
250  MPI_Finalize();
251  return result;
252 }
253 
254 /* Assume geometric topology sets correspond to interface sets
255  * in that the entities contained inclusively in a geometric
256  * topology set must be shared by the same set of processors.
257  * As we partition based on geometric volumes, this assumption
258  * should aways be true. Print for each geometric topology set
259  * the list of processors it is shared with.
260  */
261 void print_partitioned_entities( Interface& moab, bool list_non_shared = false )
262 {
263  ErrorCode rval;
264  int size, rank;
265  std::vector< int > ent_procs( MAX_SHARING_PROCS ), tmp_ent_procs( MAX_SHARING_PROCS );
266  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
267  MPI_Comm_size( MPI_COMM_WORLD, &size );
268 
269  // expect shared entities to correspond to geometric sets
270 
271  // get tags for parallel data
272  Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
273  rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHECK_ERR( rval );
274  rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHECK_ERR( rval );
275  rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHECK_ERR( rval );
276  rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHECK_ERR( rval );
277  rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHECK_ERR( rval );
278 
279  // for each geometric entity, check which processor we are sharing
280  // entities with
282  rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHECK_ERR( rval );
283  id_tag = moab.globalId_tag();
284  const char* topo_names_s[] = { "Vertex", "Curve", "Surface", "Volume" };
285  // const char* topo_names_p[] = { "Vertices", "Curves", "Surfaces", "Volumes" };
286  std::ostringstream buffer; // buffer output in an attempt to prevent lines from different
287  // processsors being mixed up.
288  for( int t = 0; t < 4; ++t )
289  {
290  Range geom;
291  int dim = t;
292  const void* ptr = &dim;
293  rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, &ptr, 1, geom );CHECK_ERR( rval );
294 
295  // for each geometric entity of dimension 't'
296  for( Range::const_iterator i = geom.begin(); i != geom.end(); ++i )
297  {
298  EntityHandle set = *i;
299  int id;
300  rval = moab.tag_get_data( id_tag, &set, 1, &id );CHECK_ERR( rval );
301 
302  buffer.clear();
303 
304  // get entities contained in this set but not its children
305  Range entities, tmp_entities, children, diff;
306  rval = moab.get_entities_by_handle( set, entities );CHECK_ERR( rval );
307  rval = moab.get_child_meshsets( set, children );CHECK_ERR( rval );
308  for( Range::const_iterator j = children.begin(); j != children.end(); ++j )
309  {
310  tmp_entities.clear();
311  rval = moab.get_entities_by_handle( *j, tmp_entities );CHECK_ERR( rval );
312  diff = subtract( entities, tmp_entities );
313  entities.swap( diff );
314  }
315 
316  // for each entity, check owning processors
317  std::vector< char > status_flags( entities.size(), 0 );
318  std::vector< int > shared_procs( entities.size(), 0 );
319  rval = moab.tag_get_data( pstatus_tag, entities, &status_flags[0] );
320  if( MB_TAG_NOT_FOUND == rval )
321  {
322  // keep default values of zero (not shared)
323  }
324  CHECK_ERR( rval );
325  unsigned num_shared = 0, num_owned = 0;
326  for( size_t j = 0; j < status_flags.size(); ++j )
327  {
328  num_shared += !!( status_flags[j] & PSTATUS_SHARED );
329  num_owned += !( status_flags[j] & PSTATUS_NOT_OWNED );
330  }
331 
332  if( !num_shared )
333  {
334  if( list_non_shared )
335  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
336  << "not shared" << std::endl;
337  }
338  else if( num_shared != entities.size() )
339  {
340  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
341  << "ERROR: " << num_shared << " of " << entities.size() << " entities marked as 'shared'"
342  << std::endl;
343  }
344  else if( num_owned && num_owned != entities.size() )
345  {
346  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
347  << "ERROR: " << num_owned << " of " << entities.size() << " entities owned by this processor"
348  << std::endl;
349  }
350  else
351  {
352  rval = moab.tag_get_data( sharedp_tag, entities, &shared_procs[0] );CHECK_ERR( rval );
353  int proc = shared_procs[0];
354  bool all_match = true;
355  for( size_t j = 1; j < shared_procs.size(); ++j )
356  if( shared_procs[j] != proc ) all_match = false;
357  if( !all_match )
358  {
359  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
360  << "ERROR: processsor IDs do not match!" << std::endl;
361  }
362  else if( proc != -1 )
363  {
364  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
365  << "shared with processor " << proc;
366  if( num_owned ) buffer << " (owned by this processor)";
367  buffer << std::endl;
368  }
369  else if( entities.empty() )
370  {
371  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
372  << "ERROR: no entities!" << std::endl;
373  }
374  else
375  {
376  Range::const_iterator j = entities.begin();
377  rval = moab.tag_get_data( sharedps_tag, &*j, 1, &ent_procs[0] );CHECK_ERR( rval );
378  for( ++j; j != entities.end(); ++j )
379  {
380  rval = moab.tag_get_data( sharedps_tag, &*j, 1, &tmp_ent_procs[0] );CHECK_ERR( rval );
381  if( ent_procs != tmp_ent_procs ) all_match = false;
382  }
383  if( !all_match )
384  {
385  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
386  << "ERROR: processsor IDs do not match!" << std::endl;
387  }
388  else
389  {
390  buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
391  << "processors ";
392  for( int k = 0; k < MAX_SHARING_PROCS; ++k )
393  if( ent_procs[k] != -1 ) buffer << ent_procs[k] << ", ";
394  if( num_owned ) buffer << " (owned by this processor)";
395  buffer << std::endl;
396  }
397  }
398  }
399  }
400  }
401  for( int i = 0; i < size; ++i )
402  {
403  MPI_Barrier( MPI_COMM_WORLD );
404  if( i == rank )
405  {
406  std::cout << buffer.str();
407  std::cout.flush();
408  }
409  }
410 }
411 
412 void load_and_partition( Interface& moab, const char* filename, bool print )
413 {
414  ErrorCode rval;
415 
416  rval = moab.load_file( filename, 0,
417  "PARALLEL=READ_DELETE;"
418  "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
419  "PARTITION_DISTRIBUTE;"
420  "PARALLEL_RESOLVE_SHARED_ENTS" );
421 
422  if( print ) print_partitioned_entities( moab );
423 
424  CHECK_ERR( rval );
425 }
426 
427 void save_and_load_on_root( Interface& moab, const char* tmp_filename )
428 {
429  ErrorCode rval;
430  int procnum;
431  MPI_Comm_rank( MPI_COMM_WORLD, &procnum );
432 
433  const char* opt = "PARALLEL=WRITE_PART";
434  std::string str;
435  if( WriteDebugLevel )
436  {
437  std::ostringstream s;
438  s << opt << ";DEBUG_IO=" << WriteDebugLevel;
439  str = s.str();
440  opt = str.c_str();
441  }
442  rval = moab.write_file( tmp_filename, 0, opt );
443  if( MB_SUCCESS != rval )
444  {
445  std::cerr << "Parallel write failed on processor " << procnum << std::endl;
446  if( procnum == 0 && !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval );
447  }
448 
449  if( procnum == 0 && KeepTmpFiles ) std::cout << "Wrote file: \"" << tmp_filename << "\"\n";
450 
451  // All created pcomm objects should be retrieved (with the pcomm tag) and
452  // deleted at this time. Otherwise, the memory used by them will be leaked
453  // as the pcomm tag will be deleted by moab.delete_mesh() below.
454  std::vector< ParallelComm* > pc_list;
455  ParallelComm::get_all_pcomm( &moab, pc_list );
456  for( std::vector< ParallelComm* >::iterator vit = pc_list.begin(); vit != pc_list.end(); ++vit )
457  delete *vit;
458 
459  moab.delete_mesh();
460  std::vector< Tag > tags;
461  rval = moab.tag_get_tags( tags );CHECK_ERR( rval );
462  for( size_t i = 0; i < tags.size(); ++i )
463  {
464  rval = moab.tag_delete( tags[i] );CHECK_ERR( rval );
465  }
466 
467  if( procnum == 0 )
468  {
469  rval = moab.load_file( tmp_filename );
470  if( !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval );
471  }
472 }
473 
475 {
476  ErrorCode rval;
477  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
478  CHECK( 0 != pcomm );
479  std::fill( counts, counts + MBENTITYSET, 0u );
480 
481  for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
482  {
483  Range range;
484  rval = moab.get_entities_by_type( 0, t, range );CHECK_ERR( rval );
485  if( !range.empty() ) rval = pcomm->filter_pstatus( range, PSTATUS_NOT_OWNED, PSTATUS_NOT );CHECK_ERR( rval );
486  counts[t] = range.size();
487  }
488 }
489 
491 {
492  ErrorCode rval;
493  std::map< EntityHandle, EntityHandle > entmap;
494 
495  // match vertices by coordinate
496  Range r1, r2;
497  Range::iterator i1, i2;
498  rval = mb1.get_entities_by_type( 0, MBVERTEX, r1 );CHECK_ERR( rval );
499  rval = mb2.get_entities_by_type( 0, MBVERTEX, r2 );CHECK_ERR( rval );
500  CHECK_EQUAL( r1.size(), r2.size() );
501  for( i1 = r1.begin(); i1 != r1.end(); ++i1 )
502  {
503  double coords1[3];
504  rval = mb1.get_coords( &*i1, 1, coords1 );CHECK_ERR( rval );
505  for( i2 = r2.begin(); i2 != r2.end(); ++i2 )
506  {
507  double coords2[3];
508  rval = mb2.get_coords( &*i2, 1, coords2 );CHECK_ERR( rval );
509  coords2[0] -= coords1[0];
510  coords2[1] -= coords1[1];
511  coords2[2] -= coords1[2];
512  double lensqr = coords2[0] * coords2[0] + coords2[1] * coords2[1] + coords2[2] * coords2[2];
513  if( lensqr < 1e-12 ) break;
514  }
515  CHECK( i2 != r2.end() );
516  entmap[*i2] = *i1;
517  r2.erase( i2 );
518  }
519 
520  // match element connectivity
521  std::vector< EntityHandle > conn1, conn2;
522  for( EntityType t = MBEDGE; t < MBENTITYSET; ++t )
523  {
524  r1.clear();
525  rval = mb1.get_entities_by_type( 0, t, r1 );CHECK_ERR( rval );
526  r2.clear();
527  rval = mb2.get_entities_by_type( 0, t, r2 );CHECK_ERR( rval );
528  CHECK_EQUAL( r1.size(), r2.size() );
529 
530  for( i1 = r1.begin(); i1 != r1.end(); ++i1 )
531  {
532  conn1.clear();
533  rval = mb1.get_connectivity( &*i1, 1, conn1 );CHECK_ERR( rval );
534  for( i2 = r2.begin(); i2 != r2.end(); ++i2 )
535  {
536  conn2.clear();
537  rval = mb2.get_connectivity( &*i2, 1, conn2 );CHECK_ERR( rval );
538  if( conn1.size() != conn2.size() ) continue;
539  for( std::vector< EntityHandle >::iterator j = conn2.begin(); j != conn2.end(); ++j )
540  *j = entmap[*j];
541  if( conn1 == conn2 ) break;
542  }
543 
544  CHECK( i2 != r2.end() );
545  entmap[*i2] = *i1;
546  r2.erase( i2 );
547  }
548  }
549 }
550 
552 {
553  int proc_counts[MBENTITYSET], all_counts[MBENTITYSET], file_counts[MBENTITYSET];
554  int err, rank;
555  ErrorCode rval;
556  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
557  CHECK( !err );
558 
559  // load and partition a .cub file
560  Core moab_instance;
561  Interface& moab = moab_instance;
562  load_and_partition( moab, InputFile, false );
563 
564  // count number of owned entities of each type and sum over all procs
565  count_owned_entities( moab, proc_counts );
566  std::fill( all_counts, all_counts + MBENTITYSET, 0u );
567  err = MPI_Allreduce( proc_counts, all_counts, MBENTITYSET, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
568  CHECK( !err );
569 
570  // do parallel write and on root proc do serial read of written file
571  save_and_load_on_root( moab, "test_write_elements.h5m" );
572  if( rank == 0 )
573  {
574  for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
575  {
576  rval = moab.get_number_entities_by_type( 0, t, file_counts[t] );CHECK_ERR( rval );
577  }
578  }
579 
580  // check that sum of owned entities equals number of entities from serial read
581 
582  err = MPI_Bcast( file_counts, MBENTITYSET, MPI_INT, 0, MPI_COMM_WORLD );
583  CHECK( !err );
584 
585  bool all_equal = true;
586  for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
587  if( file_counts[t] != all_counts[t] ) all_equal = false;
588 
589  if( rank == 0 && !all_equal )
590  {
591  std::cerr << "Type\tPartnd\tWritten" << std::endl;
592  for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
593  std::cerr << CN::EntityTypeName( t ) << '\t' << all_counts[t] << '\t' << file_counts[t] << std::endl;
594  }
595 
596  CHECK( all_equal );
597 
598  // on root processor, do serial read of original .cub file and compare
599 
600  if( rank == 0 )
601  {
602  Core moab2;
603  rval = moab2.load_file( InputFile );CHECK_ERR( rval );
604  check_identical_mesh( moab, moab2 );
605  }
606 }
607 
609 {
610  ErrorCode rval;
611  bool result = true;
612  for( EntityType t = MBVERTEX; t < MBMAXTYPE; ++t )
613  {
614  int count1, count2;
615  rval = mb1.get_number_entities_by_type( set1, t, count1 );CHECK_ERR( rval );
616  rval = mb2.get_number_entities_by_type( set2, t, count2 );CHECK_ERR( rval );
617  if( count1 != count2 )
618  {
619  std::cerr << "Sets differ in number of " << CN::EntityTypeName( t ) << " : " << count1 << " vs. " << count2
620  << std::endl;
621  result = false;
622  }
623  }
624  return result;
625 }
626 
628 {
629  int err, rank, size;
630  ErrorCode rval;
631  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
632  CHECK( !err );
633  err = MPI_Comm_size( MPI_COMM_WORLD, &size );
634  CHECK( !err );
635 
636  Core moab_instance;
637  Interface& moab = moab_instance;
638  load_and_partition( moab, InputFile );
639  save_and_load_on_root( moab, "test_write_shared_sets.h5m" );
640 
641  if( rank != 0 ) return;
642 
643  Core moab2_instance;
644  Interface& moab2 = moab2_instance;
645  rval = moab2.load_file( InputFile );CHECK_ERR( rval );
646 
647  Tag mattag1, mattag2;
648  rval = moab.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag1 );CHECK_ERR( rval );
649  rval = moab2.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag2 );CHECK_ERR( rval );
650 
651  Range matsets;
652  rval = moab2.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag2, 0, 1, matsets );CHECK_ERR( rval );
653  for( Range::iterator i = matsets.begin(); i != matsets.end(); ++i )
654  {
655  int block_id;
656  rval = moab2.tag_get_data( mattag2, &*i, 1, &block_id );CHECK_ERR( rval );
657 
658  Range tmpents;
659  void* tagdata[] = { &block_id };
660  rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag1, tagdata, 1, tmpents );
661  if( tmpents.size() != 1 ) std::cerr << tmpents.size() << " sets with material set id " << block_id << std::endl;
662  CHECK_EQUAL( (int)tmpents.size(), 1 );
663 
664  CHECK( check_sets_sizes( moab2, *i, moab, tmpents.front() ) );
665  }
666 }
667 
669 {
671  ErrorCode rval;
672  Core moab;
673  Interface& mb = moab;
674  Range verts;
675  Tag vartag;
676  const char* filename = "var-len-para.h5m";
677  const char* tagname = "ParVar";
678 
679  // If this tag doesn't exist, writer will fail
680  Tag junk_tag;
682 
683  int numproc, rank;
684  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
685  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
686 
687  // Create N+1 vertices on each processor, where N is the rank
688  std::vector< double > coords( 3 * rank + 3, (double)rank );
689  rval = mb.create_vertices( &coords[0], rank + 1, verts );CHECK_ERR( rval );
690 
691  // Create a var-len tag
693 
694  // Write data on each vertex:
695  // { n, rank, rank+1, ..., rank+n-1 } where n >= 1
696  std::vector< int > data;
697  rval = MB_SUCCESS;
698  for( i = verts.begin(); i != verts.end(); ++i )
699  {
700  EntityHandle h = *i;
701  const int n = h % 7 + 1;
702  data.resize( n + 1 );
703  data[0] = n;
704  for( int j = 0; j < n; ++j )
705  data[j + 1] = rank + j;
706  const int s = ( n + 1 );
707  const void* ptrarr[] = { &data[0] };
708  ErrorCode tmperr = mb.tag_set_by_ptr( vartag, &h, 1, ptrarr, &s );
709  if( MB_SUCCESS != tmperr ) rval = tmperr;
710  }
711  CHECK_ERR( rval );
712 
713  // Write file
714  const char* opt = "PARALLEL=WRITE_PART";
715  std::string str;
716  if( WriteDebugLevel )
717  {
718  std::ostringstream s;
719  s << opt << ";DEBUG_IO=" << WriteDebugLevel;
720  str = s.str();
721  opt = str.c_str();
722  }
723  rval = mb.write_file( filename, "MOAB", opt );CHECK_ERR( rval );
724 
725  // Read file. We only reset and re-read the file on the
726  // root processor. All other processors keep the pre-write
727  // mesh. This allows many of the tests to be run on all
728  // processors. Running the tests on the pre-write mesh on
729  // non-root processors allows us to verify that any problems
730  // are due to the file API rather than some other bug.
731  ErrorCode rval2 = rval = MB_SUCCESS;
732  if( !rank )
733  {
734  moab.~Core();
735  new( &moab ) Core;
736  rval = mb.load_mesh( filename );
737  if( !KeepTmpFiles ) remove( filename );
738  rval2 = mb.tag_get_handle( tagname, 0, MB_TYPE_INTEGER, vartag );
739  }
740  CHECK_ERR( rval );CHECK_ERR( rval2 );
741 
742  // Check that tag is correct
743  int tag_size;
744  rval = mb.tag_get_length( vartag, tag_size );
746  TagType storage;
747  rval = mb.tag_get_type( vartag, storage );CHECK_ERR( rval );
748  CHECK_EQUAL( MB_TAG_DENSE, storage );
749  DataType type;
750  rval = mb.tag_get_data_type( vartag, type );CHECK_ERR( rval );
751  CHECK_EQUAL( MB_TYPE_INTEGER, type );
752 
753  // get vertices
754  verts.clear();
755  rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
756 
757  // Check consistency of tag data on each vertex
758  // and count the number of vertices for each rank.
759  std::vector< int > vtx_counts( numproc, 0 );
760  for( i = verts.begin(); i != verts.end(); ++i )
761  {
762  EntityHandle h = *i;
763  int size = -1;
764  const void* ptrarr[1] = { 0 };
765  rval = mb.tag_get_by_ptr( vartag, &h, 1, ptrarr, &size );CHECK_ERR( rval );
766  const int* tag_data = reinterpret_cast< const int* >( ptrarr[0] );
767  CHECK( size >= 2 );
768  CHECK( NULL != tag_data );
769  CHECK_EQUAL( size - 1, tag_data[0] );
770  CHECK( tag_data[1] >= 0 && tag_data[1] < numproc );
771  ++vtx_counts[tag_data[1]];
772  for( int j = 1; j < size - 1; ++j )
773  CHECK_EQUAL( tag_data[1] + j, tag_data[1 + j] );
774  }
775 
776  // Check number of vertices for each rank
777  for( int j = 0; j < numproc; ++j )
778  {
779  // Only root should have data for other processors.
780  if( rank == 0 || rank == j )
781  CHECK_EQUAL( j + 1, vtx_counts[j] );
782  else
783  CHECK_EQUAL( 0, vtx_counts[j] );
784  }
785 }
786 
787 // create row of cubes of mesh
788 void create_input_file( const char* file_name,
789  int intervals,
790  int num_cpu,
791  int blocks_per_cpu = 1,
792  const char* ijk_vert_tag_name = 0,
793  const char* ij_set_tag_name = 0,
794  const char* global_tag_name = 0,
795  const int* global_mesh_value = 0,
796  const int* global_default_value = 0,
797  bool create_bcsets = false )
798 {
799  Core moab;
800  Interface& mb = moab;
801  ErrorCode rval;
802 
803  Tag ijk_vert_tag = 0, ij_set_tag = 0, global_tag = 0;
804  if( ijk_vert_tag_name )
805  {
806  rval = mb.tag_get_handle( ijk_vert_tag_name, 3, MB_TYPE_INTEGER, ijk_vert_tag, MB_TAG_EXCL | MB_TAG_DENSE );CHECK_ERR( rval );
807  }
808  if( ij_set_tag_name )
809  {
810  rval = mb.tag_get_handle( ij_set_tag_name, 2, MB_TYPE_INTEGER, ij_set_tag, MB_TAG_SPARSE | MB_TAG_EXCL );CHECK_ERR( rval );
811  }
812  if( global_tag_name )
813  {
814  rval = mb.tag_get_handle( global_tag_name, 1, MB_TYPE_INTEGER, global_tag, MB_TAG_DENSE | MB_TAG_EXCL,
815  global_default_value );CHECK_ERR( rval );
816  if( global_mesh_value )
817  {
818  EntityHandle root = 0;
819  rval = mb.tag_set_data( global_tag, &root, 1, global_mesh_value );CHECK_ERR( rval );
820  }
821  }
822 
823  const int num_blk = num_cpu * blocks_per_cpu;
824  int iv = intervals + 1, ii = num_blk * intervals + 1;
825  std::vector< EntityHandle > verts( iv * iv * ii );
826  int idx = 0;
827  for( int i = 0; i < ii; ++i )
828  {
829  for( int j = 0; j < iv; ++j )
830  {
831  int start = idx;
832  for( int k = 0; k < iv; ++k )
833  {
834  const double coords[3] = { static_cast< double >( i ), static_cast< double >( j ),
835  static_cast< double >( k ) };
836  rval = mb.create_vertex( coords, verts[idx] );CHECK_ERR( rval );
837  if( ijk_vert_tag )
838  {
839  int vals[] = { i, j, k };
840  rval = mb.tag_set_data( ijk_vert_tag, &verts[idx], 1, vals );CHECK_ERR( rval );
841  }
842  ++idx;
843  }
844 
845  if( ij_set_tag )
846  {
847  EntityHandle set;
848  rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval );
849  rval = mb.add_entities( set, &verts[start], idx - start );CHECK_ERR( rval );
850  int vals[] = { i, j };
851  rval = mb.tag_set_data( ij_set_tag, &set, 1, vals );CHECK_ERR( rval );
852  }
853  }
854  }
855 
856  const int eb = intervals * intervals * intervals;
857  std::vector< EntityHandle > elems( num_blk * eb );
858  idx = 0;
859  for( int c = 0; c < num_blk; ++c )
860  {
861  for( int i = c * intervals; i < ( c + 1 ) * intervals; ++i )
862  {
863  for( int j = 0; j < intervals; ++j )
864  {
865  for( int k = 0; k < intervals; ++k )
866  {
867  EntityHandle conn[8] = { verts[iv * ( iv * i + j ) + k],
868  verts[iv * ( iv * ( i + 1 ) + j ) + k],
869  verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k],
870  verts[iv * ( iv * i + j + 1 ) + k],
871  verts[iv * ( iv * i + j ) + k + 1],
872  verts[iv * ( iv * ( i + 1 ) + j ) + k + 1],
873  verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k + 1],
874  verts[iv * ( iv * i + j + 1 ) + k + 1] };
875 
876  rval = mb.create_element( MBHEX, conn, 8, elems[idx++] );CHECK_ERR( rval );
877  }
878  }
879  }
880  }
881 
882  Tag part_tag;
884 
885  std::vector< EntityHandle > parts( num_cpu );
886  for( int i = 0; i < num_cpu; ++i )
887  {
888  rval = mb.create_meshset( MESHSET_SET, parts[i] );CHECK_ERR( rval );
889  for( int j = 0; j < blocks_per_cpu; ++j )
890  {
891  rval = mb.add_entities( parts[i], &elems[( num_cpu * j + i ) * eb], eb );CHECK_ERR( rval );
892  }
893  rval = mb.tag_set_data( part_tag, &parts[i], 1, &i );CHECK_ERR( rval );
894  }
895 
896  if( create_bcsets )
897  {
898  // neumann set
899  Range skin_ents;
900  rval = Skinner( &mb ).find_skin( 0, &elems[0], elems.size(), false, skin_ents );CHECK_ERR( rval );
901  EntityHandle bcset;
902  rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
903  Tag bcset_tag;
905  int dum = 100;
906  rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
907  rval = mb.add_entities( bcset, skin_ents );CHECK_ERR( rval );
908 
909  // dirichlet set
910  rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
912  dum = 200;
913  rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
914  Range nodes;
915  rval = mb.get_adjacencies( skin_ents, 0, false, nodes, Interface::UNION );CHECK_ERR( rval );
916  rval = mb.add_entities( bcset, nodes );CHECK_ERR( rval );
917 
918  // material set
919  rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
921  dum = 300;
922  rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
923  rval = mb.add_entities( bcset, &elems[0], elems.size() );CHECK_ERR( rval );
924  }
925 
926  rval = mb.write_file( file_name, "MOAB" );CHECK_ERR( rval );
927 }
928 
929 void test_read_elements_common( bool by_rank, int intervals, bool /* print_time */, const char* extra_opts )
930 {
931  const char* file_name = by_rank ? "test_read_rank.h5m" : "test_read.h5m";
932  int numproc, rank;
933  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
934  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
935  Core moab;
936  Interface& mb = moab;
937  ErrorCode rval;
938 
939  // if root processor, create hdf5 file for use in testing
940  if( 0 == rank ) create_input_file( file_name, intervals, numproc );
941  MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file
942 
943  // do parallel read unless only one processor
944  std::string opt = get_read_options( by_rank, extra_opts );
945  rval = mb.load_file( file_name, 0, opt.c_str() );
946 
947  MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file
948  if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
949 
950  Tag part_tag;
951  rval = mb.tag_get_handle( PARTITION_TAG, 1, MB_TYPE_INTEGER, part_tag );CHECK_ERR( rval );
952 
953  Range parts;
954  rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &part_tag, 0, 1, parts );CHECK_ERR( rval );
955  CHECK_EQUAL( 1, (int)parts.size() );
956  EntityHandle part = parts.front();
957  int id;
958  rval = mb.tag_get_data( part_tag, &part, 1, &id );CHECK_ERR( rval );
959  if( by_rank )
960  {
961  CHECK_EQUAL( rank, id );
962  }
963 
964  // check that all of the elements in the mesh are in the part
965  int npart, nall;
966  rval = mb.get_number_entities_by_dimension( part, 3, npart );CHECK_ERR( rval );
967  rval = mb.get_number_entities_by_dimension( 0, 3, nall );CHECK_ERR( rval );
968  CHECK_EQUAL( npart, nall );
969 
970  // check that we have the correct vertices
971  const double x_min = intervals * rank;
972  const double x_max = intervals * ( rank + 1 );
973  Range verts;
974  rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
975  std::vector< double > coords( verts.size() );
976  rval = mb.get_coords( verts, &coords[0], 0, 0 );CHECK_ERR( rval );
977  const double act_x_min = *std::min_element( coords.begin(), coords.end() );
978  const double act_x_max = *std::max_element( coords.begin(), coords.end() );
979  CHECK_REAL_EQUAL( x_min, act_x_min, DBL_EPSILON );
980  CHECK_REAL_EQUAL( x_max, act_x_max, DBL_EPSILON );
981 }
982 
984 {
985  const char file_name[] = "read_time.h5m";
986  int numproc, rank;
987  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
988  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
989  ErrorCode rval;
990 
991  // if root processor, create hdf5 file for use in testing
992  if( 0 == rank ) create_input_file( file_name, ReadIntervals, numproc, ReadBlocks );
993  MPI_Barrier( MPI_COMM_WORLD );
994 
995  // CPU Time for true paralle, wall time for true paralle,
996  // CPU time for read and delete, wall time for read and delete
997  double times[6];
998  clock_t tmp_t;
999 
1000  // Time true parallel read
1001  Core moab;
1002  Interface& mb = moab;
1003  times[0] = MPI_Wtime();
1004  tmp_t = clock();
1005  std::string opt = get_read_options( true, READ_PART );
1006  rval = mb.load_file( file_name, 0, opt.c_str() );CHECK_ERR( rval );
1007  times[0] = MPI_Wtime() - times[0];
1008  times[1] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
1009  mb.delete_mesh();
1010 
1011  // Time read and delete
1012  Core moab2;
1013  Interface& mb2 = moab2;
1014  std::string opt2 = get_read_options( true, READ_DELETE );
1015  times[2] = MPI_Wtime();
1016  tmp_t = clock();
1017  rval = mb2.load_file( file_name, 0, opt2.c_str() );CHECK_ERR( rval );
1018  times[2] = MPI_Wtime() - times[2];
1019  times[3] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
1020  mb2.delete_mesh();
1021 
1022  // Time broadcast and delete
1023  Core moab3;
1024  Interface& mb3 = moab3;
1025  std::string opt3 = get_read_options( true, BCAST_DELETE );
1026  times[4] = MPI_Wtime();
1027  tmp_t = clock();
1028  rval = mb3.load_file( file_name, 0, opt3.c_str() );CHECK_ERR( rval );
1029  times[4] = MPI_Wtime() - times[4];
1030  times[5] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
1031  mb3.delete_mesh();
1032 
1033  double max_times[6] = { 0, 0, 0, 0, 0, 0 }, sum_times[6] = { 0, 0, 0, 0, 0, 0 };
1034  MPI_Reduce( times, max_times, 6, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
1035  MPI_Reduce( times, sum_times, 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
1036  MPI_Barrier( MPI_COMM_WORLD );
1037  if( 0 == rank )
1038  {
1039  printf( "%12s %12s %12s %12s\n", "", "READ_PART", "READ_DELETE", "BCAST_DELETE" );
1040  printf( "%12s %12g %12g %12g\n", "Max Wall", max_times[0], max_times[2], max_times[4] );
1041  printf( "%12s %12g %12g %12g\n", "Total Wall", sum_times[0], sum_times[2], sum_times[4] );
1042  printf( "%12s %12g %12g %12g\n", "Max CPU", max_times[1], max_times[3], max_times[5] );
1043  printf( "%12s %12g %12g %12g\n", "Total CPU", sum_times[1], sum_times[3], sum_times[5] );
1044  }
1045 
1046  MPI_Barrier( MPI_COMM_WORLD );
1047  if( 0 == rank && !KeepTmpFiles ) remove( file_name );
1048 }
1049 
1051 {
1052  const char tag_name[] = "test_tag_xx";
1053  const char file_name[] = "test_read_tags.h5m";
1054  int numproc, rank;
1055  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1056  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1057  Core moab;
1058  Interface& mb = moab;
1059  ErrorCode rval;
1060 
1061  // if root processor, create hdf5 file for use in testing
1062  if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, tag_name );
1063  MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file
1064 
1065  // do parallel read unless only one processor
1066  std::string opt = get_read_options();
1067  rval = mb.load_file( file_name, 0, opt.c_str() );
1068  MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file
1069  if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
1070 
1071  Tag tag;
1072  rval = mb.tag_get_handle( tag_name, 3, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
1073 
1074  TagType storage;
1075  rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
1076  CHECK_EQUAL( MB_TAG_DENSE, storage );
1077 
1078  Range verts, tagged;
1079  rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
1080  rval = mb.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, tagged );CHECK_ERR( rval );
1081  CHECK_EQUAL( verts, tagged );
1082 
1083  for( Range::iterator i = verts.begin(); i != verts.end(); ++i )
1084  {
1085  double coords[3];
1086  rval = mb.get_coords( &*i, 1, coords );CHECK_ERR( rval );
1087  int ijk[3];
1088  rval = mb.tag_get_data( tag, &*i, 1, ijk );CHECK_ERR( rval );
1089 
1090  CHECK( ijk[0] >= DefaultReadIntervals * rank );
1091  CHECK( ijk[0] <= DefaultReadIntervals * ( rank + 1 ) );
1092  CHECK( ijk[1] >= 0 );
1093  CHECK( ijk[1] <= DefaultReadIntervals );
1094  CHECK( ijk[2] >= 0 );
1095  CHECK( ijk[2] <= DefaultReadIntervals );
1096 
1097  CHECK_REAL_EQUAL( coords[0], (double)ijk[0], 1e-100 );
1098  CHECK_REAL_EQUAL( coords[1], (double)ijk[1], 1e-100 );
1099  CHECK_REAL_EQUAL( coords[2], (double)ijk[2], 1e-100 );
1100  }
1101 }
1102 
1104 {
1105  const char tag_name[] = "test_tag_g";
1106  const char file_name[] = "test_read_global_tags.h5m";
1107  int numproc, rank;
1108  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1109  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1110  Core moab;
1111  Interface& mb = moab;
1112  ErrorCode rval;
1113  const int def_val = 0xdeadcad;
1114  const int global_val = -11;
1115 
1116  // if root processor, create hdf5 file for use in testing
1117  if( 0 == rank ) create_input_file( file_name, 1, numproc, 1, 0, 0, tag_name, &global_val, &def_val );
1118  MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file
1119 
1120  // do parallel read unless only one processor
1121  std::string opt = get_read_options();
1122  rval = mb.load_file( file_name, 0, opt.c_str() );
1123  MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file
1124  if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
1125 
1126  Tag tag;
1127  rval = mb.tag_get_handle( tag_name, 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
1128 
1129  TagType storage;
1130  rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
1131  CHECK_EQUAL( MB_TAG_DENSE, storage );
1132 
1133  int mesh_def_val, mesh_gbl_val;
1134  rval = mb.tag_get_default_value( tag, &mesh_def_val );CHECK_ERR( rval );
1135  CHECK_EQUAL( def_val, mesh_def_val );
1136  EntityHandle root = 0;
1137  rval = mb.tag_get_data( tag, &root, 1, &mesh_gbl_val );CHECK_ERR( rval );
1138  CHECK_EQUAL( global_val, mesh_gbl_val );
1139 }
1140 
1141 void test_read_sets_common( const char* extra_opts )
1142 {
1143  const char tag_name[] = "test_tag_s";
1144  const char file_name[] = "test_read_sets.h5m";
1145  int numproc, rank;
1146  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1147  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1148  Core moab;
1149  Interface& mb = moab;
1150  ErrorCode rval;
1151 
1152  // if root processor, create hdf5 file for use in testing
1153  if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, 0, tag_name );
1154  MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file
1155 
1156  // do parallel read unless only one processor
1157  std::string opt = get_read_options( extra_opts );
1158  rval = mb.load_file( file_name, 0, opt.c_str() );
1159  MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file
1160  if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
1161 
1162  Tag tag;
1163  rval = mb.tag_get_handle( tag_name, 2, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
1164 
1165  TagType storage;
1166  rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
1167  CHECK_EQUAL( MB_TAG_SPARSE, storage );
1168 
1169  const int iv = DefaultReadIntervals + 1;
1170  Range sets;
1171  rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval );
1172  CHECK_EQUAL( ( iv * iv ), (int)sets.size() );
1173 
1174  for( Range::iterator i = sets.begin(); i != sets.end(); ++i )
1175  {
1176  int ij[2];
1177  rval = mb.tag_get_data( tag, &*i, 1, &ij );CHECK_ERR( rval );
1178 
1179  CHECK( ij[0] >= DefaultReadIntervals * rank );
1180  CHECK( ij[0] <= DefaultReadIntervals * ( rank + 1 ) );
1181  CHECK( ij[1] >= 0 );
1182  CHECK( ij[1] <= DefaultReadIntervals );
1183 
1184  Range contents;
1185  rval = mb.get_entities_by_handle( *i, contents );CHECK_ERR( rval );
1186  CHECK( contents.all_of_type( MBVERTEX ) );
1187  CHECK_EQUAL( iv, (int)contents.size() );
1188 
1189  for( Range::iterator v = contents.begin(); v != contents.end(); ++v )
1190  {
1191  double coords[3];
1192  rval = mb.get_coords( &*v, 1, coords );CHECK_ERR( rval );
1193  CHECK_REAL_EQUAL( coords[0], (double)ij[0], 1e-100 );
1194  CHECK_REAL_EQUAL( coords[1], (double)ij[1], 1e-100 );
1195  }
1196  }
1197 }
1198 
1200 {
1201  // const char tag_name[] = "test_tag_s";
1202  const char file_name[] = "test_read_sets.h5m";
1203  int numproc, rank;
1204  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1205  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1206  Core moab;
1207  Interface& mb = moab;
1208  ErrorCode rval;
1209 
1210  // if root processor, create hdf5 file for use in testing
1211  if( 0 == rank )
1212  create_input_file( file_name, DefaultReadIntervals, numproc, 1, NULL, NULL, NULL, NULL, NULL, true );
1213  MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file
1214 
1215  // do parallel read unless only one processor
1216  std::string opt = get_read_options();
1217  rval = mb.load_file( file_name, 0, opt.c_str() );
1218  MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file
1219  if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
1220 
1221  Tag tag;
1222  int num_ents[3], global_num_ents[3] = { 0, 0, 0 };
1223  Range sets, contents;
1225  int vints = DefaultReadIntervals + 1;
1226  int expected_num_ents[] = { ( numproc * 4 + 2 ) * DefaultReadIntervals * DefaultReadIntervals,
1227  ( ( numproc * 4 + 2 ) * ( vints - 2 ) * ( vints - 2 ) + 12 * numproc * ( vints - 2 ) +
1228  8 * numproc ),
1230 
1231  for( int i = 0; i < 3; i++ )
1232  {
1233  rval = mb.tag_get_handle( names[i], 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
1234  rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval );
1235  CHECK_EQUAL( 1, (int)sets.size() );
1236  rval = mb.get_entities_by_handle( *sets.begin(), contents, true );CHECK_ERR( rval );
1237  num_ents[i] = contents.size();
1238  sets.clear();
1239  contents.clear();
1240  }
1241 
1242  MPI_Reduce( num_ents, global_num_ents, 3, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
1243  if( 0 == rank )
1244  {
1245  // std::cout << "Global:" << global_num_ents[0] << " " << global_num_ents[1] << " " <<
1246  // global_num_ents[2] << " " << std::endl; std::cout << "Expected:" <<
1247  // expected_num_ents[0] << " " << expected_num_ents[1] << " " << expected_num_ents[2] <<
1248  // " " << std::endl;
1249 
1250  for( int i = 0; i < 3; i++ )
1251  {
1252  CHECK_EQUAL( global_num_ents[i], expected_num_ents[i] );
1253  }
1254  }
1255 }
1256 
1258 {
1259  const char file_name[] = "test_write_different_element_types.h5m";
1260  int numproc, rank;
1261  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1262  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1263  Core moab;
1264  Interface& mb = moab;
1265  ErrorCode rval;
1266 
1267  const EntityType topos[] = { MBEDGE, MBEDGE, MBQUAD, MBTRI, MBQUAD, MBTRI,
1269  const int verts[] = { 2, 3, 4, 3, 9, 6, 4, 8, 6, 5, 20, 27 };
1270  const int ntypes = sizeof( topos ) / sizeof( topos[0] );
1271 
1272  const EntityType mtype = topos[rank % ntypes];
1273  const int nvert = verts[rank % ntypes];
1274  std::vector< EntityHandle > conn( nvert );
1275  for( int i = 0; i < nvert; ++i )
1276  {
1277  const double coords[] = { static_cast< double >( rank ), static_cast< double >( i ), 0 };
1278  rval = mb.create_vertex( coords, conn[i] );CHECK_ERR( rval );
1279  }
1280  EntityHandle handle;
1281  rval = mb.create_element( mtype, &conn[0], nvert, handle );CHECK_ERR( rval );
1282 
1283  save_and_load_on_root( mb, file_name );
1284 
1285  if( rank ) return;
1286 
1287  for( int i = 0; i < ntypes; ++i )
1288  {
1289  const int num_exp = numproc / ntypes + ( i < ( numproc % ntypes ) ? 1 : 0 );
1290  Range ents;
1291  rval = mb.get_entities_by_type( 0, topos[i], ents );CHECK_ERR( rval );
1292  int num = 0;
1293  for( Range::iterator e = ents.begin(); e != ents.end(); ++e )
1294  {
1295  const EntityHandle* junk;
1296  int len;
1297  rval = mb.get_connectivity( *e, junk, len, false );
1298  if( len == verts[i] ) ++num;
1299  }
1300 
1301  CHECK_EQUAL( num_exp, num );
1302  }
1303 }
1304 
1306 {
1307  DataType type = (DataType)( rank % ( MB_MAX_DATA_TYPE + 1 ) );
1308  TagType storage = ( type == MB_TYPE_BIT ) ? MB_TAG_BIT : ( rank % 2 ) ? MB_TAG_DENSE : MB_TAG_SPARSE;
1309  int len = rank % 3 + 1;
1310  TagType cbit = create ? MB_TAG_EXCL : (TagType)0;
1311  TagType vbit = rank % 4 == 1 && storage != MB_TAG_BIT ? MB_TAG_VARLEN : (TagType)0;
1312  std::ostringstream name;
1313  name << "TestTag" << rank;
1314  const void* defval = 0;
1315  const int defint[] = { static_cast< int >( rank ), static_cast< int >( rank / 2 ), static_cast< int >( rank + 1 ),
1316  static_cast< int >( rank - 1 ) };
1317  const double defreal[] = { 0.1 * rank, 1.0 / rank, static_cast< double >( -rank ), static_cast< double >( rank ) };
1318  const int defhandle[] = { 0, 0, 0, 0 };
1319  const unsigned char defbit = 0x1;
1320  const char defopq[] = "Jason";
1321  if( rank % 4 < 2 )
1322  {
1323  switch( type )
1324  {
1325  case MB_TYPE_BIT:
1326  defval = &defbit;
1327  break;
1328  case MB_TYPE_INTEGER:
1329  defval = defint;
1330  break;
1331  case MB_TYPE_DOUBLE:
1332  defval = defreal;
1333  break;
1334  case MB_TYPE_HANDLE:
1335  defval = defhandle;
1336  break;
1337  case MB_TYPE_OPAQUE:
1338  defval = defopq;
1339  break;
1340  }
1341  }
1342 
1343  Tag result;
1344  ErrorCode rval = mb.tag_get_handle( name.str().c_str(), len, type, result, storage | cbit | vbit, defval );CHECK_ERR( rval );
1345  return result;
1346 }
1347 
1349 {
1350  const char file_name[] = "test_write_different_tags.h5m";
1351  int numproc, rank;
1352  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1353  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1354  Core moab;
1355  Interface& mb = moab;
1356 
1357  const int min_tags = 8;
1358  get_tag( mb, rank, true );
1359  for( int idx = rank + numproc; idx < min_tags; idx += numproc )
1360  get_tag( mb, idx, true );
1361 
1362  save_and_load_on_root( mb, file_name );
1363 
1364  if( 0 == rank )
1365  {
1366  for( int i = 0; i < std::max( min_tags, numproc ); ++i )
1367  get_tag( mb, i, false );
1368  }
1369 }
1370 
1372 {
1373  const char file_name[] = "test_write_polygons.h5m";
1374  int numproc, rank;
1375  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1376  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1377  Core moab;
1378  Interface& mb = moab;
1379  ErrorCode rval;
1380 
1381  // create a polygon on each process
1382  const double r = 0.70710678118654757;
1383  const double points[8][3] = {
1384  { 1, 0, static_cast< double >( rank ) },
1385  { static_cast< double >( r ), static_cast< double >( r ), static_cast< double >( rank ) },
1386  { 0, 1, static_cast< double >( rank ) },
1387  { static_cast< double >( -r ), static_cast< double >( r ), static_cast< double >( rank ) },
1388  { -1, 0, static_cast< double >( rank ) },
1389  { static_cast< double >( -r ), static_cast< double >( -r ), static_cast< double >( rank ) },
1390  { 0, -1, static_cast< double >( rank ) },
1391  { static_cast< double >( r ), static_cast< double >( -r ), static_cast< double >( rank ) } };
1392  const int nvtx = rank % 4 + 5;
1393  std::vector< EntityHandle > conn( nvtx );
1394  for( int i = 0; i < nvtx; ++i )
1395  {
1396  rval = mb.create_vertex( points[i], conn[i] );CHECK_ERR( rval );
1397  }
1398 
1399  EntityHandle h;
1400  rval = mb.create_element( MBPOLYGON, &conn[0], nvtx, h );CHECK_ERR( rval );
1401 
1402  save_and_load_on_root( mb, file_name );
1403 
1404  if( rank != 0 ) return;
1405 
1406  // check results on root process
1407 
1408  // determine which polygon was created by which proc by
1409  // looking at the z-coordinate of the vertices
1410  Range range;
1411  rval = mb.get_entities_by_type( 0, MBPOLYGON, range );
1412  std::vector< EntityHandle > poly( numproc, 0 );
1413  CHECK_EQUAL( numproc, (int)range.size() );
1414  for( Range::iterator it = range.begin(); it != range.end(); ++it )
1415  {
1416  const EntityHandle* conn_arr;
1417  int len;
1418  rval = mb.get_connectivity( *it, conn_arr, len );CHECK_ERR( rval );
1419  double coords[3];
1420  rval = mb.get_coords( conn_arr, 1, coords );CHECK_ERR( rval );
1421  int proc = (int)( coords[2] );
1422  CHECK_EQUAL( (EntityHandle)0, poly[proc] );
1423  poly[proc] = *it;
1424  }
1425 
1426  // check that each poly has the expected number of vertices
1427  for( int i = 0; i < numproc; ++i )
1428  {
1429  const EntityHandle* conn_arr;
1430  int len;
1431  rval = mb.get_connectivity( poly[i], conn_arr, len );CHECK_ERR( rval );
1432  CHECK_EQUAL( i % 4 + 5, len );
1433  }
1434 }
1435 
1436 // Some processes have no mesh to write
1438 {
1439  const char file_name[] = "test_write_unbalanced.h5m";
1440  int numproc, rank;
1441  MPI_Comm_size( MPI_COMM_WORLD, &numproc );
1442  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1443  Core moab;
1444  Interface& mb = moab;
1445  ErrorCode rval;
1446  Tag idtag = mb.globalId_tag();
1447 
1448  // create a shared set
1449  const int two = 2;
1450  Range entities, sets;
1451 
1452  EntityHandle set;
1453  rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval );
1454  rval = mb.tag_set_data( idtag, &set, 1, &two );CHECK_ERR( rval );
1455  sets.insert( set );
1456 
1457  // create a quad on every odd processor
1458  if( rank % 2 )
1459  {
1460  const double coords[4][3] = { { static_cast< double >( rank ), 0, 0 },
1461  { static_cast< double >( rank + 2 ), 0, 0 },
1462  { static_cast< double >( rank + 2 ), 2, 0 },
1463  { static_cast< double >( rank ), 2, 0 } };
1464  EntityHandle conn[4], quad;
1465  for( int i = 0; i < 4; ++i )
1466  mb.create_vertex( coords[i], conn[i] );
1467  mb.create_element( MBQUAD, conn, 4, quad );
1468 
1469  const int ids[4] = { rank, rank + 2, rank + 3, rank + 1 };
1470  rval = mb.tag_set_data( idtag, conn, 4, ids );CHECK_ERR( rval );
1471 
1472  rval = mb.add_entities( set, &quad, 1 );CHECK_ERR( rval );
1473 
1474  entities.insert( quad );
1475  }
1476 
1477  // set up sharing data
1478  ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
1479  if( 0 == pcomm ) pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
1480  rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );CHECK_ERR( rval );
1481  rval = pcomm->resolve_shared_sets( sets, idtag );CHECK_ERR( rval );
1482 
1483  // do parallel write and serial load
1484  save_and_load_on_root( mb, file_name );
1485 
1486  if( rank != 0 ) return;
1487 
1488  // check that we got what we expected
1489  Range quads, verts;
1490  rval = mb.get_entities_by_type( 0, MBQUAD, quads );CHECK_ERR( rval );
1491  rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
1492 
1493  const size_t nquads = numproc / 2;
1494  const size_t nverts = nquads ? 2 + 2 * nquads : 0;
1495  CHECK_EQUAL( nquads, quads.size() );
1496  CHECK_EQUAL( nverts, verts.size() );
1497 
1498  rval = mb.tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, idtag );CHECK_ERR( rval );
1499  sets.clear();
1500  const void* vals[] = { &two };
1501  rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &idtag, vals, 1, sets );CHECK_ERR( rval );
1502  CHECK_EQUAL( (size_t)1, sets.size() );
1503 
1504  entities.clear();
1505  rval = mb.get_entities_by_handle( sets.front(), entities );CHECK_ERR( rval );
1506  CHECK_EQUAL( nquads, entities.size() );
1507  CHECK_EQUAL( quads, entities );
1508 }
1509 
1510 // this test will load a file that has 2 partitions (mix.h5m)
1511 // one partition with triangles, and one with triangles and quads
1512 // a dense tag created on this should be dense in the file
1513 //
1515 {
1516  int err, rank, size;
1517  ErrorCode rval;
1518  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1519  CHECK( !err );
1520  err = MPI_Comm_size( MPI_COMM_WORLD, &size );
1521  CHECK( !err );
1522 
1523  Core moab;
1524  rval = moab.load_file( InputMix, 0,
1525  "PARALLEL=READ_PART;"
1526  "PARTITION=PARALLEL_PARTITION;"
1527  "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
1528 
1529  // create an dense tag, on all elements, then write the output in parallel
1530  // load the file again, and test the type of the tag
1531  Range elems;
1532  moab.get_entities_by_dimension( 0, 2, elems );
1533 
1534  const char* tagname = "element_tag";
1535  const double defVal = 0.;
1536  Tag fieldTag;
1537  rval = moab.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, fieldTag, MB_TAG_DENSE | MB_TAG_CREAT, &defVal );CHECK_ERR( rval );
1538 
1539  int numElems = (int)elems.size();
1540 
1541  for( int i = 0; i < numElems; i++ )
1542  {
1543  EntityHandle elem = elems[i];
1544  double xyz[3];
1545  moab.get_coords( &elem, 1, xyz );
1546  // some value
1547  double fieldValue = sqrt( xyz[0] * xyz[0] + xyz[1] * xyz[1] + xyz[2] * xyz[2] );
1548  moab.tag_set_data( fieldTag, &elem, 1, &fieldValue );
1549  }
1550 
1551  // write the file in parallel
1552  rval = moab.write_file( "newfile.h5m", 0, "PARALLEL=WRITE_PART" );CHECK_ERR( rval );
1553 
1554  // now read the new file, in a new instance, and test the tag type
1555 
1556  Core moab2;
1557  rval = moab2.load_file( "newfile.h5m", 0,
1558  "PARALLEL=READ_PART;"
1559  "PARTITION=PARALLEL_PARTITION;"
1560  "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
1561 
1562  // find the element tag
1563  Tag found_tag;
1564  rval = moab2.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, found_tag );CHECK_ERR( rval );
1565  TagType tagt;
1566  rval = moab2.tag_get_type( found_tag, tagt );CHECK_ERR( rval );
1567  CHECK( tagt == MB_TAG_DENSE );
1568 }
1569 // this test will load a file that has 2 partitions (oneside.h5m)
1570 // and one side set, that is adjacent to one part only
1571 //
1573 {
1574  int err, rank, size;
1575  ErrorCode rval;
1576  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1577  CHECK( !err );
1578  err = MPI_Comm_size( MPI_COMM_WORLD, &size );
1579  CHECK( !err );
1580 
1581  Core moab;
1582  rval = moab.load_file( InputOneSide, 0,
1583  "PARALLEL=READ_PART;"
1584  "PARTITION=PARALLEL_PARTITION;"
1585  "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
1586 
1587  return;
1588 }