MOAB: Mesh Oriented datABase  (version 5.5.0)
parallel_unit_tests.cpp
Go to the documentation of this file.
1 #include "moab/ParallelComm.hpp"
3 #include "moab/ParCommGraph.hpp"
4 #include "ReadParallel.hpp"
5 #include "moab/FileOptions.hpp"
6 #include "MBTagConventions.hpp"
7 #include "moab/Core.hpp"
8 #include "moab_mpi.h"
9 #include "TestUtil.hpp"
10 
11 #include <iostream>
12 #include <algorithm>
13 #include <map>
14 #include <sstream>
15 #include <cassert>
16 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
17 #include <unistd.h>
18 #endif
19 
20 using namespace moab;
21 
22 #define CHKERR( a ) \
23  do \
24  { \
25  ErrorCode val = ( a ); \
26  if( MB_SUCCESS != val ) \
27  { \
28  std::cerr << "Error code " << val << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
29  return val; \
30  } \
31  } while( false )
32 
33 #define PCHECK( A ) \
34  if( is_any_proc_error( !( A ) ) ) return report_error( __FILE__, __LINE__ )
35 
36 ErrorCode report_error( const char* file, int line )
37 {
38  std::cerr << "Failure at " << file << ':' << line << std::endl;
39  return MB_FAILURE;
40 }
41 
42 /**************************************************************************
43  Utility Method Declarations
44  **************************************************************************/
45 
46 // Get processors an entity is shared with.
47 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out );
48 
49 // Create a parallel mesh
50 //
51 // Each processor will create four quads.
52 // Groups of four quads will be arranged as follows:
53 // +------+------+------+------+------+-----
54 // | | |
55 // | | |
56 // + Proc 0 + Proc 2 + Proc 4
57 // | | |
58 // | | |
59 // +------+------+------+------+------+-----
60 // | | |
61 // | | |
62 // + Proc 1 + Proc 3 + Proc 5
63 // | | |
64 // | | |
65 // +------+------+------+------+------+-----
66 //
67 // Vertices will be enumerated as follows:
68 // 1------6-----11-----16-----21-----26-----
69 // | | |
70 // | | |
71 // 2 7 12 17 22 27
72 // | | |
73 // | | |
74 // 3------8-----13-----18-----23-----28-----
75 // | | |
76 // | | |
77 // 4 9 14 19 24 29
78 // | | |
79 // | | |
80 // 5-----10-----15-----20-----25-----30-----
81 //
82 // Element IDs will be [4*rank+1,4*rank+5]
83 //
84 // If output_sets is non-null, it will be populated with 2 or 3
85 // set handles. A set handle is created for each group of four
86 // adjacent processes, such that one is created across procs 0 to 4,
87 // one is created for procs 2 to 5, etc. An additional set is created
88 // that spans all of the processes. The set spanning all procs is
89 // returned first. The other two sets are returned in the order of the
90 // largest contained process rank, where the last entry is zero if
91 // there is only one additional set.
93  int output_vertx_ids[9],
94  EntityHandle output_vertex_handles[9],
95  Range& output_elements,
96  EntityHandle output_sets[3] = 0 );
97 
98 // Test if is_my_error is non-zero on any processor in MPI_COMM_WORLD
99 int is_any_proc_error( int is_my_error );
100 
101 /**************************************************************************
102  Test Declarations
103  **************************************************************************/
104 
105 // Check consistancy of sharing data. (E.g. compare sharing procs for
106 // vertices to that of adjacent elements, compare sharing data for
107 // interfaces with that of contained entities, etc.)
109 // Test correct ghosting of elements
114 // Test exchange of tag data on ghost elements
116 // Bug where exchange_tags fails if dense tag cannot be queried
117 // for all ghost entities (e.g. no default value)
119 // Test owners for interface entities
120 ErrorCode test_interface_owners( const char* );
121 // Test data for shared interface entitites with one level of ghosting
123 // Verify all sharing data for vertices with one level of ghosting
125 // Test assignment of global IDs
126 ErrorCode test_assign_global_ids( const char* );
127 // Test shared sets
128 ErrorCode test_shared_sets( const char* );
129 // Test reduce_tags
130 ErrorCode test_reduce_tags( const char* );
131 // Test reduce_tags failures
132 ErrorCode test_reduce_tag_failures( const char* );
133 // Test reduce_tags with explicit destination tag
135 // Test delete_entities
136 ErrorCode test_delete_entities( const char* );
137 // Test ghosting polyhedra
138 ErrorCode test_ghost_polyhedra( const char* );
139 // Test failed read with too few parts in partition
140 ErrorCode test_too_few_parts( const char* );
141 // Test broken sequences due to ghosting
143 // Test trivial partition in use by iMOAB
145 
146 /**************************************************************************
147  Main Method
148  **************************************************************************/
149 
150 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
151 
152 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
153 {
154  ErrorCode result = ( *func )( file_name );
155  int is_err = is_any_proc_error( ( MB_SUCCESS != result ) );
156  int rank;
157  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
158  if( rank == 0 )
159  {
160  if( is_err )
161  std::cout << func_name << " : FAILED!!" << std::endl;
162  else
163  std::cout << func_name << " : success" << std::endl;
164  }
165 
166  return is_err;
167 }
168 
169 int main( int argc, char* argv[] )
170 {
171  int rank, size;
172  MPI_Init( &argc, &argv );
173  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
174  MPI_Comm_size( MPI_COMM_WORLD, &size );
175 
176  int pause_proc = -1;
177  std::string filename;
178  for( int i = 1; i < argc; ++i )
179  {
180  if( !strcmp( argv[i], "-p" ) )
181  {
182  ++i;
183  assert( i < argc );
184  pause_proc = atoi( argv[i] );
185  }
186  else if( !filename.size() )
187  {
188  filename = std::string( argv[i] );
189  }
190  else
191  {
192  std::cerr << "Invalid arg: \"" << argv[i] << '"' << std::endl
193  << "Usage: " << argv[0] << " [-p <rank>] [<filename>]" << std::endl;
194  exit( 1 );
195  }
196  }
197 
198  if( !filename.size() )
199  {
200 #ifdef MOAB_HAVE_HDF5
201  filename = TestDir + "unittest/64bricks_512hex.h5m";
202 #else
203  filename = TestDir + "unittest/64bricks_512hex.vtk";
204 #endif
205  }
206  std::cout << "Loading " << filename << "..\n";
207 #ifdef MOAB_HAVE_HDF5
208  std::string filename2 = TestDir + "unittest/64bricks_1khex.h5m";
209  std::string filename3 = TestDir + "unittest/twoPolyh.h5m";
210  std::string filename4 = TestDir + "unittest/onepart.h5m";
211 #endif
212 
213  if( pause_proc != -1 )
214  {
215 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
216  std::cout << "Processor " << rank << " of " << size << " with PID " << getpid() << std::endl;
217  std::cout.flush();
218 #endif
219  // loop forever on requested processor, giving the user time
220  // to attach a debugger. Once the debugger in attached, user
221  // can change 'pause'. E.g. on gdb do "set var pause = 0"
222  if( pause_proc == rank )
223  {
224  volatile int pause = 1;
225  while( pause )
226  ;
227  }
228 
229  MPI_Barrier( MPI_COMM_WORLD );
230  std::cout << "Processor " << rank << " resuming" << std::endl;
231  }
232 
233  int num_errors = 0;
234 #ifdef MOAB_HAVE_HDF5
235  num_errors += RUN_TEST_ARG2( test_elements_on_several_procs, filename.c_str() );
236  num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_1, filename.c_str() );
237  num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_2, filename.c_str() );
238  num_errors += RUN_TEST_ARG2( test_ghost_elements_3_0_1, filename.c_str() );
239  num_errors += RUN_TEST_ARG2( test_ghost_elements_2_0_1, filename.c_str() );
240  num_errors += RUN_TEST_ARG2( test_ghost_tag_exchange, filename.c_str() );
242  num_errors += RUN_TEST_ARG2( test_delete_entities, filename2.c_str() );
243  num_errors += RUN_TEST_ARG2( test_sequences_after_ghosting, filename2.c_str() );
244  if( 2 >= size ) // run this one only on one or 2 processors; the file has only 2 parts in partition
245  num_errors += RUN_TEST_ARG2( test_ghost_polyhedra, filename3.c_str() );
246  num_errors += RUN_TEST_ARG2( test_too_few_parts, filename4.c_str() );
247 #endif
248  num_errors += RUN_TEST_ARG2( test_assign_global_ids, 0 );
249  num_errors += RUN_TEST_ARG2( test_shared_sets, 0 );
250  num_errors += RUN_TEST_ARG2( test_reduce_tags, 0 );
251  num_errors += RUN_TEST_ARG2( test_reduce_tag_failures, 0 );
252  num_errors += RUN_TEST_ARG2( test_reduce_tag_explicit_dest, 0 );
253  num_errors += RUN_TEST_ARG2( test_interface_owners, 0 );
256  num_errors += RUN_TEST( test_trivial_partition );
257  if( rank == 0 )
258  {
259  if( !num_errors )
260  std::cout << "All tests passed" << std::endl;
261  else
262  std::cout << num_errors << " TESTS FAILED!" << std::endl;
263  }
264 
265  MPI_Finalize();
266  return num_errors;
267 }
268 
269 /**************************************************************************
270  Utility Method Implementations
271  **************************************************************************/
272 
273 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out )
274 {
275  ErrorCode rval;
276 
277  // get tags for parallel data
278  Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
279  rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHKERR( rval );
280  rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHKERR( rval );
281  rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHKERR( rval );
282  rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHKERR( rval );
283  rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHKERR( rval );
284 
285  other_procs_out.clear();
286  char status;
287  rval = moab.tag_get_data( pstatus_tag, &entity, 1, &status );CHKERR( rval );
288  if( !( status & PSTATUS_SHARED ) ) return MB_SUCCESS;
289 
290  int proc_id;
291  rval = moab.tag_get_data( sharedp_tag, &entity, 1, &proc_id );CHKERR( rval );
292  if( proc_id >= 0 )
293  {
294  other_procs_out.push_back( proc_id );
295  return MB_SUCCESS;
296  }
297 
298  int procs[MAX_SHARING_PROCS];
299  rval = moab.tag_get_data( sharedps_tag, &entity, 1, procs );CHKERR( rval );
300  for( int i = 0; i < MAX_SHARING_PROCS && procs[i] >= 0; ++i )
301  other_procs_out.push_back( procs[i] );
302  return MB_SUCCESS;
303 }
304 
305 int is_any_proc_error( int is_my_error )
306 {
307  int result = 0;
308  int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
309  return err || result;
310 }
311 
313  int vtx_ids[9],
314  EntityHandle vtx_handles[9],
315  Range& range,
316  EntityHandle* entity_sets )
317 {
318  // Each processor will create four quads.
319  // Groups of four quads will be arranged as follows:
320  // +------+------+------+------+------+-----
321  // | | |
322  // | | |
323  // + Proc 0 + Proc 2 + Proc 4
324  // | | |
325  // | | |
326  // +------+------+------+------+------+-----
327  // | | |
328  // | | |
329  // + Proc 1 + Proc 3 + Proc 5
330  // | | |
331  // | | |
332  // +------+------+------+------+------+-----
333  //
334  // Vertices will be enumerated as follows:
335  // 1------6-----11-----16-----21-----26-----
336  // | | |
337  // | | |
338  // 2 7 12 17 22 27
339  // | | |
340  // | | |
341  // 3------8-----13-----18-----23-----28-----
342  // | | |
343  // | | |
344  // 4 9 14 19 24 29
345  // | | |
346  // | | |
347  // 5-----10-----15-----20-----25-----30-----
348  //
349  // Element IDs will be [4*rank+1,4*rank+5]
350 
351  int size, rank;
352  MPI_Comm_size( MPI_COMM_WORLD, &size );
353  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
354 
355  const int first_vtx_id = 10 * ( rank / 2 ) + 2 * ( rank % 2 ) + 1;
356  const double x = 2.0 * ( rank / 2 );
357  const double y = 2.0 * ( rank % 2 );
358 
359  // create vertices
360  const int idoff = ( size % 2 && rank / 2 == size / 2 ) ? 0 : 2;
361  const int idoff1 = rank ? 2 : idoff;
362  const int idoff2 = idoff1 + idoff;
363  const int ids[9] = { first_vtx_id, first_vtx_id + 3 + idoff1, first_vtx_id + 6 + idoff2,
364  first_vtx_id + 1, first_vtx_id + 4 + idoff1, first_vtx_id + 7 + idoff2,
365  first_vtx_id + 2, first_vtx_id + 5 + idoff1, first_vtx_id + 8 + idoff2 };
366  memcpy( vtx_ids, ids, sizeof( ids ) );
367  const double coords[27] = { x, y, 0, x + 1, y, 0, x + 2, y, 0, x, y + 1, 0, x + 1, y + 1,
368  0, x + 2, y + 1, 0, x, y + 2, 0, x + 1, y + 2, 0, x + 2, y + 2, 0 };
369 
370  ErrorCode rval;
371  Tag id_tag;
372 
373  rval = mb.create_vertices( coords, 9, range );CHKERR( rval );
374  assert( range.size() == 9 );
375  std::copy( range.begin(), range.end(), vtx_handles );
376  range.clear();
377  id_tag = mb.globalId_tag();
378  rval = mb.tag_set_data( id_tag, vtx_handles, 9, &ids );CHKERR( rval );
379 
380  const EntityHandle conn[4][4] = { { vtx_handles[0], vtx_handles[3], vtx_handles[4], vtx_handles[1] },
381  { vtx_handles[1], vtx_handles[4], vtx_handles[5], vtx_handles[2] },
382  { vtx_handles[3], vtx_handles[6], vtx_handles[7], vtx_handles[4] },
383  { vtx_handles[4], vtx_handles[7], vtx_handles[8], vtx_handles[5] } };
384  for( int i = 0; i < 4; ++i )
385  {
386  const int id = 4 * rank + i + 1;
387  EntityHandle h;
388  rval = mb.create_element( MBQUAD, conn[i], 4, h );CHKERR( rval );
389  range.insert( h );
390  rval = mb.tag_set_data( id_tag, &h, 1, &id );CHKERR( rval );
391  }
392 
393  if( !entity_sets ) return MB_SUCCESS;
394 
395  int set_ids[3] = { size + 1, rank / 2, rank / 2 + 1 };
396  int nsets = 0;
397  rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
398  rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
399  entity_sets[nsets] = 0;
400  if( rank < 2 )
401  { // if first (left-most) column
402  set_ids[1] = set_ids[2];
403  }
404  else if( rank / 2 < ( size - 1 ) / 2 )
405  { // if not last (right-most) column
406  rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
407  }
408  for( int i = 0; i < nsets; ++i )
409  {
410  rval = mb.add_entities( entity_sets[0], range );CHKERR( rval );
411  }
412  rval = mb.tag_set_data( id_tag, entity_sets, nsets, set_ids );CHKERR( rval );
413 
414  return MB_SUCCESS;
415 }
416 
417 /**************************************************************************
418  Test Implementations
419  **************************************************************************/
420 
422 {
425  ErrorCode rval;
426  const char* geom_names[] = { "vertex", "curve", "surface", "volume", "unknown" };
427 
428  rval = moab.load_file( filename, 0,
429  "PARALLEL=READ_DELETE;"
430  "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
431  "PARTITION_DISTRIBUTE;"
432  "PARALLEL_RESOLVE_SHARED_ENTS;"
433  "PARALLEL_SEQUENCE_FACTOR=1.4" );CHKERR( rval );
434 
435  // test contents of interface sets against sharedEnts structure in pcomm;
436  int my_error = 0;
437  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
438  rval = pcomm->check_all_shared_handles();
439  if( MB_SUCCESS != rval )
440  {
441  my_error = 1;
442  std::cerr << "check_all_shared_handles test failed on proc " << pcomm->proc_config().proc_rank() << std::endl;
443  }
444  PCHECK( !my_error );
445 
446  // check adjacencies just to make sure they're consistent
447  rval = mb_instance.check_adjacencies();
448  if( MB_SUCCESS != rval ) my_error = 1;
449  PCHECK( !my_error );
450 
452  rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
453  id_tag = moab.globalId_tag();
454 
455  // Because we partitioned based on geometric volume sets, then for each
456  // lower-dimension geometric entity set all of the contained entities of
457  // the same dimension must be shared by the same set of processors
458  Range shared, invalid;
459  for( int dim = 2; dim > 0; --dim )
460  {
461  Range geom_sets;
462  const void* tagvals[] = { &dim };
463  rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, tagvals, 1, geom_sets );CHKERR( rval );
464 
465  for( Range::iterator j = geom_sets.begin(); j != geom_sets.end(); ++j )
466  {
467  Range ents;
468  rval = moab.get_entities_by_dimension( *j, dim, ents );CHKERR( rval );
469  if( ents.empty() ) continue;
470 
471  // get a single sharing list to compare with
472  Range::iterator k = ents.begin();
473  std::vector< int > procs;
474  rval = get_sharing_processors( moab, *k, procs );CHKERR( rval );
475  std::sort( procs.begin(), procs.end() );
476  if( procs.size() > 1 ) shared.merge( ents );
477 
478  // compare other elements
479  for( ++k; k != ents.end(); ++k )
480  {
481  std::vector< int > tmp_procs;
482  rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
483  std::sort( tmp_procs.begin(), tmp_procs.end() );
484  if( tmp_procs != procs ) invalid.insert( *j );
485  }
486 
487  // compare interior vertices
488  ents.clear();
489  rval = moab.get_entities_by_dimension( *j, 0, ents );CHKERR( rval );
490  for( k = ents.begin(); k != ents.end(); ++k )
491  {
492  std::vector< int > tmp_procs;
493  rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
494  if( tmp_procs != procs ) invalid.insert( *j );
495  }
496  }
497  }
498 
499  // Report any geometric sets for which sharing was inconsistent
500  if( !invalid.empty() )
501  {
502  std::cerr << "Elements or vertices owned by a single geometric entity are "
503  << "not shared by the same set of processors for the "
504  << "following geometric entities on process " << pcomm->proc_config().proc_rank() << ": ";
505  for( Range::iterator i = invalid.begin(); i != invalid.end(); ++i )
506  {
507  int dim;
508  int id;
509  rval = moab.tag_get_data( geom_tag, &*i, 1, &dim );
510  if( MB_SUCCESS != rval ) dim = 4;
511  rval = moab.tag_get_data( id_tag, &*i, 1, &id );
512  if( MB_SUCCESS != rval ) id = -1;
513  std::cerr << geom_names[dim] << " " << id << ", ";
514  }
515  std::cerr << std::endl;
516 
517  my_error = 1;
518  }
519  PCHECK( !my_error );
520 
521  // for each shared element, check that its vertices are shared
522  // with at least the same processes
523  for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
524  {
525  std::vector< int > procs;
526  rval = get_sharing_processors( moab, *i, procs );CHKERR( rval );
527  std::sort( procs.begin(), procs.end() );
528 
529  std::vector< EntityHandle > tmp;
530  const EntityHandle* conn;
531  int len;
532  rval = moab.get_connectivity( *i, conn, len, false, &tmp );CHKERR( rval );
533  for( int j = 0; j < len; ++j )
534  {
535  std::vector< int > vprocs;
536  rval = get_sharing_processors( moab, conn[j], vprocs );CHKERR( rval );
537  std::sort( vprocs.begin(), vprocs.end() );
538  std::vector< int > diff( std::max( procs.size(), vprocs.size() ) );
539  std::vector< int >::iterator k =
540  std::set_difference( procs.begin(), procs.end(), vprocs.begin(), vprocs.end(), diff.begin() );
541  if( k != diff.begin() ) // difference is not empty
542  invalid.insert( conn[j] );
543  }
544  }
545 
546  // Report any vertices with insufficient sharing
547  if( !invalid.empty() )
548  {
549  std::cerr << "Vertices must be shared with at least the union of the processes "
550  << "sharing the elements containing the vertex. This is NOT true for "
551  << "the following vertices on process " << pcomm->proc_config().proc_rank() << ": " << invalid
552  << std::endl;
553 
554  my_error = 1;
555  }
556  PCHECK( !my_error );
557 
558  return MB_SUCCESS;
559 }
560 
562 {
563  Range all_ents;
564  ErrorCode rval;
565 
566  rval = pcomm.get_moab()->get_entities_by_handle( 0, all_ents );CHKERR( rval );
567  std::vector< unsigned char > flags( all_ents.size() );
568  rval = pcomm.get_moab()->tag_get_data( pcomm.pstatus_tag(), all_ents, &flags[0] );CHKERR( rval );
569 
570  Range::iterator ins = ghost_ents.begin();
571  std::vector< unsigned char >::const_iterator f = flags.begin();
572  for( Range::iterator i = all_ents.begin(); i != all_ents.end(); ++i, ++f )
573  if( ( *f & PSTATUS_NOT_OWNED ) && !( *f & PSTATUS_INTERFACE ) ) ins = ghost_ents.insert( ins, *i, *i );
574 
575  return MB_SUCCESS;
576 }
577 
579  const Tag tags[2],
580  int dimension,
581  const std::vector< int >& ids,
582  Range& results )
583 {
584  ErrorCode rval;
585  for( size_t i = 0; i < ids.size(); ++i )
586  {
587  const void* tag_vals[2] = { &dimension, &ids[i] };
588  Range sets;
589  rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, tags, tag_vals, 2, sets );CHKERR( rval );
590  for( Range::iterator j = sets.begin(); j != sets.end(); ++j )
591  {
592  Range ents;
593  rval = moab.get_entities_by_dimension( *j, dimension, ents );CHKERR( rval );
594  results.merge( ents );
595  }
596  }
597  return MB_SUCCESS;
598 }
599 
600 /**\brief Given entire file and IDs of geometric sets, get expected ghost entities
601  *
602  *\param moab The entire mesh, loaded in serial
603  *\param partition_geom_ids IDs of: geometric volumes owned by this proc and interface topology
604  *\param ghost_entity_ids output list
605  */
607  const std::vector< int > partition_geom_ids[4],
608  std::vector< int >& ghost_entity_ids,
609  int ghost_dimension,
610  int bridge_dimension,
611  int num_layers )
612 {
613  ErrorCode rval;
614  Tag tags[2];
615  rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tags[0] );CHKERR( rval );
616  tags[1] = moab.globalId_tag();
617 
618  // get face entities defining interface
619  Range skin;
620  rval = get_ents_from_geometric_sets( moab, tags, 2, partition_geom_ids[2], skin );CHKERR( rval );
621 
622  // get adjacent entities
623  Range iface_ghosts, iface_ents;
624  if( bridge_dimension == 2 )
625  {
626  iface_ents = skin;
627  }
628  else
629  {
630  rval = moab.get_adjacencies( skin, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
631  }
632  for( int n = 0; n < num_layers; ++n )
633  {
634  iface_ghosts.clear();
635  rval = moab.get_adjacencies( iface_ents, ghost_dimension, false, iface_ghosts, Interface::UNION );CHKERR( rval );
636  iface_ents.clear();
637  rval = moab.get_adjacencies( iface_ghosts, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
638  }
639 
640  // get volume entities owned by this process
641  Range ents;
642  rval = get_ents_from_geometric_sets( moab, tags, 3, partition_geom_ids[3], ents );CHKERR( rval );
643 
644  // get entities of ghost_dimension owned by this process
645  Range owned;
646  if( ghost_dimension == 3 )
647  owned = ents;
648  else
649  {
650  rval = moab.get_adjacencies( ents, ghost_dimension, false, owned, Interface::UNION );CHKERR( rval );
651  }
652 
653  // remove owned entities from ghost list
654  Range ghosts = subtract( iface_ghosts, owned );
655 
656  // get ids
657  ghost_entity_ids.resize( ghosts.size() );
658  rval = moab.tag_get_data( tags[1], ghosts, &ghost_entity_ids[0] );CHKERR( rval );
659  return MB_SUCCESS;
660 }
661 
662 ErrorCode test_ghost_elements( const char* filename, int ghost_dimension, int bridge_dimension, int num_layers )
663 {
666  ErrorCode rval;
667 
668  std::ostringstream file_opts;
669  file_opts << "PARALLEL=READ_DELETE;"
670  << "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
671  << "PARTITION_DISTRIBUTE;"
672  << "PARALLEL_RESOLVE_SHARED_ENTS;"
673  << "PARALLEL_GHOSTS=" << ghost_dimension << '.' << bridge_dimension << '.' << num_layers;
674 
675  rval = moab.load_file( filename, 0, file_opts.str().c_str() );CHKERR( rval );
677  rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
678  id_tag = moab.globalId_tag();
679 
680  // Get partition sets
681  Range partition_geom[4];
682  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
683  partition_geom[3] = pcomm->partition_sets();
684  PCHECK( !partition_geom[3].empty() );
685 
686  rval = pcomm->check_all_shared_handles();CHKERR( rval );
687 
688  // exchange id tags to allow comparison by id
689  Range tmp_ents;
690  rval = pcomm->get_shared_entities( -1, tmp_ents, -1, false, true );
691  rval = pcomm->exchange_tags( id_tag, tmp_ents );CHKERR( rval );
692 
693  // Get geometric surfaces
694  Range surfs, tmp;
695  for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
696  {
697  tmp.clear();
698  rval = moab.get_child_meshsets( *i, tmp );CHKERR( rval );
699  surfs.merge( tmp );
700  }
701 
702  // Get the set of geometric surfaces that represent the skin
703  // of the union of the parts for this processor. As we partition
704  // based on geometric volumes, the interface must be represented
705  // by some subset of the surfaces and their child geometric topology.
706 
707  int error = 0;
708  std::ostringstream error_msg;
709  Range ents, iface_surfs, iface_curves, iface_vertices;
710  for( Range::iterator i = surfs.begin(); !error && i != surfs.end(); ++i )
711  {
712  ents.clear();
713  rval = moab.get_entities_by_dimension( *i, ghost_dimension - 1, ents );CHKERR( rval );
714  if( ents.empty() ) continue;
715 
716  std::vector< int > procs, tmp_procs;
717  Range::iterator j = ents.begin();
718  rval = get_sharing_processors( moab, *j, procs );CHKERR( rval );
719  for( ++j; !error && j != ents.end(); ++j )
720  {
721  tmp_procs.clear();
722  rval = get_sharing_processors( moab, *j, tmp_procs );CHKERR( rval );
723  if( tmp_procs != procs )
724  {
725  error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
726  << "\tNot all entities in geometric surface are shared with"
727  << " same processor." << std::endl;
728  error = 1;
729  break;
730  }
731  }
732 
733  if( error ) break;
734 
735  // if surface is not part of inter-proc interface, skip it.
736  if( procs.empty() ) continue;
737  if( procs.size() != 1 )
738  {
739  error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
740  << "\tSurface elements shared with" << procs.size() << "processors." << std::endl;
741  error = 1;
742  break;
743  }
744  int other_rank = procs[0];
745  if( other_rank == (int)pcomm->proc_config().proc_rank() ) continue;
746 
747  partition_geom[2].insert( *i );
748  ents.clear();
749  rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
750  partition_geom[1].merge( ents );
751  }
752 
753  // Don't to global communication until outside
754  // of loops. Otherwise we will deadlock if not all
755  // procs iterate the same number of times.
756  if( is_any_proc_error( error ) )
757  {
758  std::cerr << error_msg.str();
759  return MB_FAILURE;
760  }
761 
762  for( Range::iterator i = partition_geom[1].begin(); i != partition_geom[1].end(); ++i )
763  {
764  ents.clear();
765  rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
766  partition_geom[0].merge( ents );
767  }
768 
769  std::vector< int > partn_geom_ids[4];
770  for( int dim = 0; dim <= 3; ++dim )
771  {
772  partn_geom_ids[dim].resize( partition_geom[dim].size() );
773  rval = moab.tag_get_data( id_tag, partition_geom[dim], &( partn_geom_ids[dim][0] ) );CHKERR( rval );
774  }
775 
776  // get the global IDs of the ghosted entities
777  Range ghost_ents;
778  rval = get_ghost_entities( *pcomm, ghost_ents );CHKERR( rval );
779  std::pair< Range::iterator, Range::iterator > vtx = ghost_ents.equal_range( MBVERTEX );
780  ghost_ents.erase( vtx.first, vtx.second );
781  std::vector< int > actual_ghost_ent_ids( ghost_ents.size() );
782  rval = moab.tag_get_data( id_tag, ghost_ents, &actual_ghost_ent_ids[0] );CHKERR( rval );
783 
784  // read file in serial
785  Core moab2;
786  rval = moab2.load_file( filename );
787  PCHECK( MB_SUCCESS == rval );
788 
789  // get the global IDs of the entities we expect to be ghosted
790  std::vector< int > expected_ghost_ent_ids;
791  rval = get_expected_ghosts( moab2, partn_geom_ids, expected_ghost_ent_ids, ghost_dimension, bridge_dimension,
792  num_layers );
793  PCHECK( MB_SUCCESS == rval );
794 
795  // check that the correct entities were ghosted
796  std::sort( actual_ghost_ent_ids.begin(), actual_ghost_ent_ids.end() );
797  std::sort( expected_ghost_ent_ids.begin(), expected_ghost_ent_ids.end() );
798  PCHECK( expected_ghost_ent_ids == actual_ghost_ent_ids );
799 
800  // check we only have the partitioned and ghosted entities
801  // on this processor.
802  Range myents;
803  for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
804  {
805  ents.clear();
806  rval = moab.get_entities_by_dimension( *i, 3, ents );CHKERR( rval );
807  myents.merge( ents );
808  }
809  if( ghost_dimension != 3 )
810  {
811  ents.clear();
812  rval = moab.get_adjacencies( myents, ghost_dimension, false, ents, Interface::UNION );
813  PCHECK( MB_SUCCESS == rval );
814  myents.swap( ents );
815  }
816  myents.merge( ghost_ents );
817  ents.clear();
818  rval = moab.get_entities_by_dimension( 0, ghost_dimension, ents );
819  PCHECK( ents == myents );
820 
821  rval = pcomm->check_all_shared_handles();
822  if( MB_SUCCESS != rval ) error = 1;
823  PCHECK( !error );
824 
825  // done
826  return MB_SUCCESS;
827 }
828 
830 {
831  return test_ghost_elements( filename, 3, 2, 1 );
832 }
833 
835 {
836  return test_ghost_elements( filename, 3, 2, 2 );
837 }
838 
840 {
841  return test_ghost_elements( filename, 3, 0, 1 );
842 }
843 
845 {
846  return test_ghost_elements( filename, 2, 0, 1 );
847 }
848 
849 ErrorCode get_owner_handles( ParallelComm* pcomm, const Range& ents, EntityHandle* handle_arr )
850 {
851  size_t i = 0;
852  int junk;
853  for( Range::iterator j = ents.begin(); j != ents.end(); ++i, ++j )
854  {
855  ErrorCode rval = pcomm->get_owner_handle( *j, junk, handle_arr[i] );
856  if( MB_SUCCESS != rval ) return rval;
857  }
858  return MB_SUCCESS;
859 }
860 
862 {
865  ErrorCode rval;
866 
867  rval = moab.load_file( filename, 0,
868  "PARALLEL=READ_DELETE;"
869  "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
870  "PARTITION_DISTRIBUTE;"
871  "PARALLEL_RESOLVE_SHARED_ENTS;"
872  "PARALLEL_GHOSTS=3.2.1;"
873  "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
874 
875  // Get ghost elements
876  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
877  Range local, ghosts;
878  rval = moab.get_entities_by_dimension( 0, 3, local );CHKERR( rval );
879  Range::iterator i = local.begin();
880  while( i != local.end() )
881  {
882  int rank;
883  rval = pcomm->get_owner( *i, rank );CHKERR( rval );
884  if( rank == (int)pcomm->proc_config().proc_rank() )
885  {
886  ++i;
887  }
888  else
889  {
890  ghosts.insert( *i );
891  i = local.erase( i );
892  }
893  }
894 
895  // create a tag to exchange
896  Tag dense_test_tag;
897  EntityHandle defval = 0;
898  // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
899  // dense_test_tag, &defval ); CHKERR(rval);
900  rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL, &defval );CHKERR( rval );
901 
902  // for all entities that I own, set tag to handle value
903  std::vector< EntityHandle > handles( local.size() ), handles2;
904  std::copy( local.begin(), local.end(), handles.begin() );
905  rval = moab.tag_set_data( dense_test_tag, local, &handles[0] );CHKERR( rval );
906 
907  // exchange tag data
908  Range tmp_range;
909  rval = pcomm->exchange_tags( dense_test_tag, tmp_range );CHKERR( rval );
910 
911  // make sure local values are unchanged
912  handles2.resize( local.size() );
913  rval = moab.tag_get_data( dense_test_tag, local, &handles2[0] );CHKERR( rval );
914  PCHECK( handles == handles2 );
915 
916  // compare values on ghost entities
917  handles.resize( ghosts.size() );
918  handles2.resize( ghosts.size() );
919  rval = moab.tag_get_data( dense_test_tag, ghosts, &handles2[0] );CHKERR( rval );
920  rval = get_owner_handles( pcomm, ghosts, &handles[0] );CHKERR( rval );
921  PCHECK( handles == handles2 );
922 
923  // now do it all again for a sparse tag
924  Tag sparse_test_tag;
925  // rval = moab.tag_get_handle( "TEST-TAG-2", sizeof(int), MB_TAG_DENSE,
926  // MB_TYPE_INTEGER, sparse_test_tag, 0 ); CHKERR(rval);
927  rval = moab.tag_get_handle( "TEST-TAG-2", 1, MB_TYPE_INTEGER, sparse_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
928 
929  // for all entiites that I own, set tag to my rank
930  std::vector< int > procs1( local.size(), pcomm->proc_config().proc_rank() );
931  rval = moab.tag_set_data( sparse_test_tag, local, &procs1[0] );CHKERR( rval );
932 
933  // exchange tag data
934  tmp_range.clear();
935  rval = pcomm->exchange_tags( sparse_test_tag, tmp_range );
936  PCHECK( MB_SUCCESS == rval );
937 
938  // make sure local values are unchanged
939  std::vector< int > procs2( local.size() );
940  rval = moab.tag_get_data( sparse_test_tag, local, &procs2[0] );CHKERR( rval );
941  PCHECK( procs1 == procs2 );
942 
943  // compare values on ghost entities
944  procs1.resize( ghosts.size() );
945  procs2.resize( ghosts.size() );
946  rval = moab.tag_get_data( sparse_test_tag, ghosts, &procs2[0] );CHKERR( rval );
947  std::vector< int >::iterator j = procs1.begin();
948  for( i = ghosts.begin(); i != ghosts.end(); ++i, ++j )
949  {
950  rval = pcomm->get_owner( *i, *j );CHKERR( rval );
951  }
952  PCHECK( procs1 == procs2 );
953 
954  return MB_SUCCESS;
955 }
956 
958 {
961  ErrorCode rval;
962 
963 #ifdef MOAB_HAVE_HDF5
964  rval = moab.load_file( filename, 0,
965  "PARALLEL=READ_DELETE;"
966  "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
967  "PARTITION_DISTRIBUTE;"
968  "PARALLEL_RESOLVE_SHARED_ENTS;"
969  "PARALLEL_GHOSTS=3.2.1" );
970 #else
971  rval = moab.load_file( filename, 0,
972  "PARALLEL=READ_BCAST;"
973  "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
974  "PARTITION_DISTRIBUTE;"
975  "PARALLEL_RESOLVE_SHARED_ENTS;"
976  "PARALLEL_GHOSTS=3.2.1" );
977 #endif
978  CHKERR( rval );
979 
980  // create a tag to exchange
981  Tag dense_test_tag;
982  // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
983  // dense_test_tag, 0 ); CHKERR(rval);
984  rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
985 
986  // exchange tag data
987  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
988  Range tmp_range;
989  rval = pcomm->exchange_tags( dense_test_tag, tmp_range );
990  PCHECK( MB_SUCCESS == rval );
991 
992  return MB_SUCCESS;
993 }
994 
995 // Helper for exhange_sharing_data
996 // Swap contens of buffer with specified processor.
997 int MPI_swap( void* buffer, int num_val, MPI_Datatype val_type, int other_proc )
998 {
999  int err, rank, bytes;
1000  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1001  MPI_Type_size( val_type, &bytes );
1002  bytes *= num_val;
1003  std::vector< unsigned char > buffer2( bytes );
1004 
1005  for( int i = 0; i < 2; ++i )
1006  {
1007  if( i == ( rank < other_proc ) )
1008  {
1009  err = MPI_Send( buffer, num_val, val_type, other_proc, 0, MPI_COMM_WORLD );
1010  if( err ) return err;
1011  }
1012  else
1013  {
1014  MPI_Status status;
1015  err = MPI_Recv( &buffer2[0], num_val, val_type, other_proc, 0, MPI_COMM_WORLD, &status );
1016  if( err ) return err;
1017  }
1018  }
1019 
1020  memcpy( buffer, &buffer2[0], bytes );
1021  return 0;
1022 }
1023 
1024 int valid_ghosting_owners( int comm_size, const int* ids, const int* owners )
1025 {
1026  // for each vertex ID, build list of {rank,owner} tuples
1027  std::map< int, std::vector< int > > verts;
1028  for( int p = 0; p < comm_size; ++p )
1029  {
1030  for( int i = 0; i < 9; ++i )
1031  { // nine verts per proc
1032  int idx = 9 * p + i;
1033  verts[ids[idx]].push_back( p );
1034  verts[ids[idx]].push_back( owners[idx] );
1035  }
1036  }
1037 
1038  // now check for each vertex that the owner from
1039  // each processor is the same
1040  bool print_desc = true;
1041  int error_count = 0;
1042  std::map< int, std::vector< int > >::iterator it;
1043  for( it = verts.begin(); it != verts.end(); ++it )
1044  {
1045  int id = it->first;
1046  std::vector< int >& list = it->second;
1047  bool all_same = true;
1048  for( size_t i = 2; i < list.size(); i += 2 )
1049  if( list[i + 1] != list[1] ) all_same = false;
1050  if( all_same ) continue;
1051 
1052  ++error_count;
1053 
1054  if( print_desc )
1055  {
1056  print_desc = false;
1057  std::cerr << "ERROR at " __FILE__ ":" << __LINE__ << std::endl
1058  << " Processors have inconsistant ideas of vertex ownership:" << std::endl;
1059  }
1060 
1061  std::cerr << " Vertex " << id << ": " << std::endl;
1062  for( size_t i = 0; i < list.size(); i += 2 )
1063  std::cerr << " Proc " << list[i] << " thinks owner is " << list[i + 1] << std::endl;
1064  }
1065 
1066  return error_count;
1067 }
1068 
1070 {
1071  ErrorCode rval;
1072  Core moab_instance;
1073  Interface& mb = moab_instance;
1074  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1075 
1076  // build distributed quad mesh
1077  Range quads;
1078  EntityHandle verts[9];
1079  int ids[9];
1080  rval = parallel_create_mesh( mb, ids, verts, quads );
1081  PCHECK( MB_SUCCESS == rval );
1082  rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
1083  PCHECK( MB_SUCCESS == rval );
1084  if( num_ghost_layers )
1085  {
1086  rval = pcomm.exchange_ghost_cells( 2, 0, num_ghost_layers, 0, true );
1087  PCHECK( MB_SUCCESS == rval );
1088  }
1089 
1090  // get vertex owners
1091  int owner[9];
1092  for( int i = 0; i < 9; ++i )
1093  {
1094  rval = pcomm.get_owner( verts[i], owner[i] );
1095  if( MB_SUCCESS != rval ) break;
1096  }
1097  PCHECK( MB_SUCCESS == rval );
1098 
1099  // exchange vertex owners amongst all processors
1100  int rank, size, ierr;
1101  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1102  MPI_Comm_size( MPI_COMM_WORLD, &size );
1103  std::vector< int > all_ids( 9 * size ), all_owner( 9 * size );
1104  ierr = MPI_Gather( ids, 9, MPI_INT, &all_ids[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
1105  if( ierr ) return MB_FAILURE;
1106  ierr = MPI_Gather( owner, 9, MPI_INT, &all_owner[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
1107  if( ierr ) return MB_FAILURE;
1108 
1109  int errors = rank ? 0 : valid_ghosting_owners( size, &all_ids[0], &all_owner[0] );
1110  MPI_Bcast( &errors, 1, MPI_INT, 0, MPI_COMM_WORLD );
1111  return errors ? MB_FAILURE : MB_SUCCESS;
1112 }
1113 
1114 // Common implementation for both:
1115 // test_interface
1116 // regression_interface_with_ghosting
1118 {
1119  return test_interface_owners_common( 0 );
1120 }
1121 
1123 {
1124  return test_interface_owners_common( 1 );
1125 }
1126 
1127 struct VtxData
1128 {
1129  std::vector< int > procs;
1130  std::vector< int > owners;
1131  std::vector< EntityHandle > handles;
1132 };
1133 
1135 {
1136  ErrorCode rval;
1137  Core moab_instance;
1138  Interface& mb = moab_instance;
1139  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1140 
1141  int rank, size;
1142  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1143  MPI_Comm_size( MPI_COMM_WORLD, &size );
1144 
1145  // build distributed quad mesh
1146  Range quads;
1147  EntityHandle verts[9];
1148  int ids[9];
1149  rval = parallel_create_mesh( mb, ids, verts, quads );
1150  PCHECK( MB_SUCCESS == rval );
1151  rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
1152  PCHECK( MB_SUCCESS == rval );
1153  rval = pcomm.exchange_ghost_cells( 2, 1, 1, 0, true );
1154  PCHECK( MB_SUCCESS == rval );
1155 
1156  rval = pcomm.check_all_shared_handles();
1157  PCHECK( MB_SUCCESS == rval );
1158 
1159  return MB_SUCCESS;
1160 }
1161 
1163  const EntityHandle* entities,
1164  const int* orig_ids,
1165  int num_ents,
1166  const char* singular_name,
1167  const char* plural_name )
1168 {
1169  ErrorCode rval;
1170  int rank, size, ierr;
1171  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1172  MPI_Comm_size( MPI_COMM_WORLD, &size );
1173 
1174  Tag id_tag = mb.globalId_tag();
1175  std::vector< int > new_ids( num_ents );
1176  rval = mb.tag_get_data( id_tag, entities, num_ents, &new_ids[0] );CHKERR( rval );
1177  // This test is wrong. a) The caller can select a start ID so there's
1178  // no guarantee that the IDs will be in any specific range and b) There
1179  // is no reason to expect the global number of entities to be num_ents*size
1180  // if there are any shared entities. J.Kraftcheck 2010-12-22
1181  // rval = MB_SUCCESS;
1182  // for (int i = 0; i < num_ents; ++i)
1183  // if (new_ids[i] < 0 || new_ids[i] >= num_ents*size) {
1184  // std::cerr << "ID out of bounds on proc " << rank
1185  // << " : " << new_ids[i] << " not in [0," << num_ents*size-1
1186  // << "]" << std::endl;
1187  // rval = MB_FAILURE;
1188  // }
1189  // if (MB_SUCCESS != rval)
1190  // return rval;
1191 
1192  // Gather up all data on root proc for consistency check
1193  std::vector< int > all_orig_ids( num_ents * size ), all_new_ids( num_ents * size );
1194  ierr = MPI_Gather( (void*)orig_ids, num_ents, MPI_INT, &all_orig_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
1195  if( ierr ) return MB_FAILURE;
1196  ierr = MPI_Gather( &new_ids[0], num_ents, MPI_INT, &all_new_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
1197  if( ierr ) return MB_FAILURE;
1198 
1199  // build a local map from original ID to new ID and use it
1200  // to check for consistancy between all procs
1201  rval = MB_SUCCESS;
1202  ;
1203  if( 0 == rank )
1204  {
1205  // check for two processors having different global ID for same entity
1206  std::map< int, int > idmap; // index by original ID and contains new ID
1207  std::map< int, int > owner; // index by original ID and contains owning rank
1208  for( int i = 0; i < num_ents * size; ++i )
1209  {
1210  std::map< int, int >::iterator it = idmap.find( all_orig_ids[i] );
1211  if( it == idmap.end() )
1212  {
1213  idmap[all_orig_ids[i]] = all_new_ids[i];
1214  owner[all_orig_ids[i]] = i / num_ents;
1215  }
1216  else if( it->second != all_new_ids[i] )
1217  {
1218  std::cerr << "Inconsistant " << singular_name << " IDs between processors " << owner[all_orig_ids[i]]
1219  << " and " << i / num_ents << " : " << it->second << " and " << all_new_ids[i]
1220  << " respectively." << std::endl;
1221  rval = MB_FAILURE;
1222  }
1223  }
1224  // check for two processors having same global ID for different entities
1225  idmap.clear();
1226  owner.clear();
1227  for( int i = 0; i < num_ents * size; ++i )
1228  {
1229  std::map< int, int >::iterator it = idmap.find( all_new_ids[i] );
1230  if( it == idmap.end() )
1231  {
1232  idmap[all_new_ids[i]] = all_orig_ids[i];
1233  owner[all_new_ids[i]] = i / num_ents;
1234  }
1235  else if( it->second != all_orig_ids[i] )
1236  {
1237  std::cerr << "ID " << all_new_ids[i] << " assigned to different " << plural_name << " on processors "
1238  << owner[all_new_ids[i]] << " and " << i / num_ents << std::endl;
1239  rval = MB_FAILURE;
1240  }
1241  }
1242  }
1243  return rval;
1244 }
1245 
1247 {
1248  ErrorCode rval;
1249  Core moab_instance;
1250  Interface& mb = moab_instance;
1251  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1252 
1253  int rank, size;
1254  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1255  MPI_Comm_size( MPI_COMM_WORLD, &size );
1256 
1257  // build distributed quad mesh
1258  Range quad_range;
1259  EntityHandle verts[9];
1260  int vert_ids[9];
1261  rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
1262  PCHECK( MB_SUCCESS == rval );
1263  rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
1264  PCHECK( MB_SUCCESS == rval );
1265 
1266  // get global ids for quads
1267  Tag id_tag = mb.globalId_tag();
1268  assert( 4u == quad_range.size() );
1269  EntityHandle quads[4];
1270  std::copy( quad_range.begin(), quad_range.end(), quads );
1271  int quad_ids[4];
1272  rval = mb.tag_get_data( id_tag, quads, 4, quad_ids );CHKERR( rval );
1273 
1274  // clear GLOBAL_ID tag
1275  int zero[9] = { 0 };
1276  rval = mb.tag_set_data( id_tag, verts, 9, zero );CHKERR( rval );
1277  rval = mb.tag_set_data( id_tag, quads, 4, zero );CHKERR( rval );
1278 
1279  // assign new global IDs
1280  rval = pcomm.assign_global_ids( 0, 2 );
1281  PCHECK( MB_SUCCESS == rval );
1282 
1283  rval = check_consistent_ids( mb, verts, vert_ids, 9, "vertex", "vertices" );
1284  PCHECK( MB_SUCCESS == rval );
1285  rval = check_consistent_ids( mb, quads, quad_ids, 4, "quad", "quads" );
1286  PCHECK( MB_SUCCESS == rval );
1287  return rval;
1288 }
1289 
1291 {
1292  ErrorCode rval;
1293  Core moab_instance;
1294  Interface& mb = moab_instance;
1295  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1296 
1297  int rank_i, size_i;
1298  MPI_Comm_rank( MPI_COMM_WORLD, &rank_i );
1299  MPI_Comm_size( MPI_COMM_WORLD, &size_i );
1300  const unsigned rank = rank_i;
1301  const unsigned nproc = size_i;
1302 
1303  // build distributed quad mesh
1304  Range quads, sets;
1305  EntityHandle verts[9], set_arr[3];
1306  int ids[9];
1307  rval = parallel_create_mesh( mb, ids, verts, quads, set_arr );
1308  PCHECK( MB_SUCCESS == rval );
1309  rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
1310  PCHECK( MB_SUCCESS == rval );
1311  sets.insert( set_arr[0] );
1312  sets.insert( set_arr[1] );
1313  if( set_arr[2] )
1314  {
1315  sets.insert( set_arr[2] );
1316  }
1317  else
1318  {
1319  set_arr[2] = set_arr[1];
1320  }
1321 
1322  Tag id_tag = mb.globalId_tag();
1323  rval = pcomm.resolve_shared_sets( sets, id_tag );
1324  PCHECK( MB_SUCCESS == rval );
1325 
1326  // check that set data is consistent
1327  ErrorCode ok = MB_SUCCESS;
1328  for( size_t i = 0; i < 3; ++i )
1329  {
1330  unsigned owner;
1331  EntityHandle owner_handle, local;
1332  rval = pcomm.get_entityset_owner( set_arr[i], owner, &owner_handle );
1333  if( MB_SUCCESS != rval )
1334  ok = rval;
1335  else if( owner == rank )
1336  {
1337  if( owner_handle != set_arr[i] )
1338  {
1339  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid remote handle for owned set"
1340  << std::endl;
1341  ok = MB_FAILURE;
1342  }
1343  }
1344  else if( MB_SUCCESS != pcomm.get_entityset_local_handle( owner, owner_handle, local ) )
1345  ok = MB_FAILURE;
1346  else if( local != set_arr[i] )
1347  {
1348  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid local handle for remote data"
1349  << std::endl;
1350  ok = MB_FAILURE;
1351  }
1352  }
1353  PCHECK( MB_SUCCESS == ok );
1354 
1355  const unsigned col = rank / 2; // which column is proc in
1356  const unsigned colrank =
1357  2 * col; // rank of first of two procs in col (rank if rank is even, rank-1 if rank is odd)
1358  unsigned mins[3] = { 0, 0, 0 };
1359  unsigned maxs[3] = { nproc - 1, 0, 0 };
1360  if( rank < 2 )
1361  { // if in first (left-most) column, then
1362  mins[1] = mins[2] = 0;
1363  maxs[1] = maxs[2] = std::min( 3u, nproc - 1 );
1364  }
1365  else if( col == ( nproc - 1 ) / 2 )
1366  { // else if last (right-most) column, then
1367  mins[1] = mins[2] = colrank - 2;
1368  maxs[1] = maxs[2] = std::min( colrank + 1, nproc - 1 );
1369  }
1370  else
1371  { // if inside column, then
1372  mins[1] = colrank - 2;
1373  mins[2] = colrank;
1374  maxs[1] = std::min( colrank + 1, nproc - 1 );
1375  maxs[2] = std::min( colrank + 3, nproc - 1 );
1376  }
1377 
1378  // check expected sharing lists
1379  // set 0 is shared between all procs in the range [ mins[0], maxs[0] ]
1380  std::vector< unsigned > expected, list;
1381  for( size_t i = 0; i < 3; ++i )
1382  {
1383  expected.clear();
1384  for( unsigned r = mins[i]; r <= std::min( maxs[i], nproc - 1 ); ++r )
1385  if( r != rank ) expected.push_back( r );
1386  list.clear();
1387  rval = pcomm.get_entityset_procs( set_arr[i], list );
1388  if( MB_SUCCESS != rval )
1389  ok = rval;
1390  else
1391  {
1392  std::sort( list.begin(), list.end() );
1393  if( expected != list )
1394  {
1395  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " incorrect sharing list for entity set"
1396  << std::endl;
1397  ok = MB_FAILURE;
1398  }
1399  }
1400  }
1401  PCHECK( MB_SUCCESS == ok );
1402 
1403  // check consistent owners
1404  unsigned send_list[6], set_owners[3];
1405  std::vector< unsigned > recv_list( 6 * nproc );
1406  for( size_t i = 0; i < 3; ++i )
1407  {
1408  mb.tag_get_data( id_tag, set_arr + i, 1, &send_list[2 * i] );
1409  pcomm.get_entityset_owner( set_arr[i], set_owners[i] );
1410  send_list[2 * i + 1] = set_owners[i];
1411  }
1412  MPI_Allgather( send_list, 6, MPI_UNSIGNED, &recv_list[0], 6, MPI_UNSIGNED, MPI_COMM_WORLD );
1413  std::map< unsigned, unsigned > owners;
1414  for( unsigned i = 0; i < 6 * nproc; i += 2 )
1415  {
1416  unsigned id = recv_list[i];
1417  unsigned owner = recv_list[i + 1];
1418  if( owners.find( id ) == owners.end() )
1419  owners[id] = owner;
1420  else if( owners[id] != owner )
1421  {
1422  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " mismatched owners (" << owners[id]
1423  << " and " << owner << ") for set with ID " << id << std::endl;
1424  ok = MB_FAILURE;
1425  }
1426  }
1427  PCHECK( MB_SUCCESS == ok );
1428 
1429  // test PComm::get_entityset_owners
1430  std::vector< unsigned > act_owners, exp_owners;
1431  for( size_t i = 0; i < 3; ++i )
1432  {
1433  exp_owners.push_back( set_owners[i] );
1434  }
1435  ok = pcomm.get_entityset_owners( act_owners );
1436  PCHECK( MB_SUCCESS == ok );
1437  // PCHECK(std::find(act_owners.begin(),act_owners.end(),rank) == act_owners.end());
1438  std::sort( act_owners.begin(), act_owners.end() );
1439  std::sort( exp_owners.begin(), exp_owners.end() );
1440  exp_owners.erase( std::unique( exp_owners.begin(), exp_owners.end() ), exp_owners.end() );
1441  PCHECK( exp_owners == act_owners );
1442 
1443  // test PComm::get_owned_sets
1444  ok = MB_SUCCESS;
1445  for( unsigned i = 0; i < nproc; ++i )
1446  {
1447  sets.clear();
1448  if( MB_SUCCESS != pcomm.get_owned_sets( i, sets ) )
1449  {
1450  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
1451  << " failed to get shared set list for sets owned by rank " << set_owners[i] << std::endl;
1452  continue;
1453  }
1454 
1455  Range expected_range;
1456  for( size_t j = 0; j < 3; ++j )
1457  if( set_owners[j] == i ) expected_range.insert( set_arr[j] );
1458 
1459  if( expected_range != sets )
1460  {
1461  std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
1462  << " has incorrect shared set list for sets owned by rank " << set_owners[i] << std::endl
1463  << "Expected: " << expected_range << std::endl
1464  << "Actual: " << sets << std::endl;
1465  ok = MB_FAILURE;
1466  }
1467  }
1468  PCHECK( MB_SUCCESS == ok );
1469 
1470  return MB_SUCCESS;
1471 }
1472 
1473 template < typename T >
1474 ErrorCode check_shared_ents( ParallelComm& pcomm, Tag tagh, T fact, MPI_Op mpi_op )
1475 {
1476  // get the shared entities
1477  Range shared_ents;
1478  ErrorCode rval = pcomm.get_shared_entities( -1, shared_ents );CHKERR( rval );
1479 
1480  std::vector< int > shprocs( MAX_SHARING_PROCS );
1481  std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
1482  std::vector< T > dum_vals( shared_ents.size() );
1483  int np;
1484  unsigned char pstatus;
1485 
1486  rval = pcomm.get_moab()->tag_get_data( tagh, shared_ents, &dum_vals[0] );CHKERR( rval );
1487 
1488  // for each, compare number of sharing procs against tag value, should be 1/fact the tag value
1489  Range::iterator rit;
1490  int i = 0;
1491  for( rit = shared_ents.begin(); rit != shared_ents.end(); ++rit, i++ )
1492  {
1493  rval = pcomm.get_sharing_data( *rit, &shprocs[0], &shhandles[0], pstatus, np );CHKERR( rval );
1494  if( 1 == np && shprocs[0] != (int)pcomm.proc_config().proc_rank() ) np++;
1495  bool with_root = std::find( &shprocs[0], &shprocs[np], 0 ) - &shprocs[0] != np || !pcomm.rank();
1496  if( mpi_op == MPI_SUM )
1497  {
1498  if( dum_vals[i] != fact * np ) return MB_FAILURE;
1499  }
1500  else if( mpi_op == MPI_PROD )
1501  {
1502  if( dum_vals[i] != pow( fact, np ) ) return MB_FAILURE;
1503  }
1504  else if( mpi_op == MPI_MAX )
1505  {
1506  if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
1507  }
1508  else if( mpi_op == MPI_MIN )
1509  {
1510  if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
1511  }
1512  else
1513  return MB_FAILURE;
1514  }
1515 
1516  return MB_SUCCESS;
1517 }
1518 
1519 template < typename T >
1521 {
1522  ErrorCode rval;
1523  Core moab_instance;
1524  Interface& mb = moab_instance;
1525  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1526 
1527  int rank, size;
1528  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1529  MPI_Comm_size( MPI_COMM_WORLD, &size );
1530 
1531  // build distributed quad mesh
1532  Range quad_range;
1533  EntityHandle verts[9];
1534  int vert_ids[9];
1535  rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
1536  PCHECK( MB_SUCCESS == rval );
1537  rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
1538  PCHECK( MB_SUCCESS == rval );
1539 
1540  Tag test_tag;
1541  T def_val = 2;
1542  Range dum_range;
1543  std::vector< T > dum_vals;
1544 
1545  // T, MPI_SUM
1546  rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
1547  rval = pcomm.reduce_tags( test_tag, MPI_SUM, dum_range );CHKERR( rval );
1548  rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_SUM );CHKERR( rval );
1549  rval = mb.tag_delete( test_tag );
1550 
1551  // T, MPI_PROD
1552  rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
1553  rval = pcomm.reduce_tags( test_tag, MPI_PROD, dum_range );CHKERR( rval );
1554  rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_PROD );CHKERR( rval );
1555  rval = mb.tag_delete( test_tag );
1556 
1557  // T, MPI_MAX
1558  rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
1559  // get owned entities and set a different value on them
1560  rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
1561  if( pcomm.rank() == 0 )
1562  {
1563  dum_vals.resize( dum_range.size(), (T)3 );
1564  rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
1565  }
1566  rval = pcomm.reduce_tags( test_tag, MPI_MAX, dum_range );CHKERR( rval );
1567  rval = check_shared_ents( pcomm, test_tag, (T)3, MPI_MAX );CHKERR( rval );
1568  rval = mb.tag_delete( test_tag );
1569 
1570  // T, MPI_MIN
1571  rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
1572  // get owned entities and set a different value on them
1573  if( pcomm.rank() == 0 )
1574  {
1575  std::fill( dum_vals.begin(), dum_vals.end(), (T)-1 );
1576  rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
1577  }
1578  rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );CHKERR( rval );
1579  rval = check_shared_ents( pcomm, test_tag, (T)-1, MPI_MIN );CHKERR( rval );
1580  rval = mb.tag_delete( test_tag );
1581 
1582  return MB_SUCCESS;
1583 }
1584 
1586 {
1587  // test things that should fail
1588  ErrorCode rval;
1589  Core moab_instance;
1590  Interface& mb = moab_instance;
1591  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1592 
1593  int rank, size;
1594  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1595  MPI_Comm_size( MPI_COMM_WORLD, &size );
1596 
1597  // build distributed quad mesh
1598  Range quad_range;
1599  EntityHandle verts[9];
1600  int vert_ids[9];
1601  rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
1602  PCHECK( MB_SUCCESS == rval );
1603  rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
1604  PCHECK( MB_SUCCESS == rval );
1605 
1606  Tag test_tag;
1607  Range dum_range;
1608  int def_int;
1609  double def_dbl;
1610 
1611  // explicit destination tag of different type
1612  Tag test_dest;
1613  std::vector< Tag > src_tags, dest_tags;
1614  rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_int );CHKERR( rval );
1615  src_tags.push_back( test_tag );
1616  rval = mb.tag_get_handle( "dtest_tag", 1, MB_TYPE_DOUBLE, test_dest, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
1617  dest_tags.push_back( test_dest );
1618  rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_MIN, dum_range );
1619  PCHECK( rval == MB_FAILURE );
1620  rval = mb.tag_delete( test_dest );CHKERR( rval );
1621  rval = mb.tag_delete( test_tag );CHKERR( rval );
1622 
1623  // tag w/ no default value
1624  rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT );CHKERR( rval );
1625  rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );
1626  PCHECK( rval == MB_ENTITY_NOT_FOUND );
1627  rval = mb.tag_delete( test_tag );
1628 
1629  return MB_SUCCESS;
1630 }
1631 
1633 {
1634  ErrorCode rval = MB_SUCCESS, tmp_rval;
1635 
1636  tmp_rval = test_reduce_tags< int >( "test_reduce_tags (int)", MB_TYPE_INTEGER );
1637  if( MB_SUCCESS != tmp_rval )
1638  {
1639  std::cout << "test_reduce_tags failed for int data type." << std::endl;
1640  rval = tmp_rval;
1641  }
1642 
1643  tmp_rval = test_reduce_tags< double >( "test_reduce_tags (dbl)", MB_TYPE_DOUBLE );
1644  if( MB_SUCCESS != tmp_rval )
1645  {
1646  std::cout << "test_reduce_tags failed for double data type." << std::endl;
1647  rval = tmp_rval;
1648  }
1649 
1650  return rval;
1651 }
1652 
1654 {
1655  // test tag_reduce stuff with explicit destination tag
1656  ErrorCode rval;
1657  Core moab_instance;
1658  Interface& mb = moab_instance;
1659  ParallelComm pcomm( &mb, MPI_COMM_WORLD );
1660 
1661  int rank, size;
1662  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
1663  MPI_Comm_size( MPI_COMM_WORLD, &size );
1664 
1665  // build distributed quad mesh
1666  Range quad_range;
1667  EntityHandle verts[9];
1668  int vert_ids[9];
1669  rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
1670  PCHECK( MB_SUCCESS == rval );
1671  rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
1672  PCHECK( MB_SUCCESS == rval );
1673 
1674  Tag src_tag, dest_tag;
1675  Range dum_range;
1676  double def_dbl = 5.0;
1677 
1678  // src tag has one value, pre-existing dest tag has another; should get MPI_Op of src values
1679  std::vector< Tag > src_tags, dest_tags;
1680  // create the tags
1681  rval = mb.tag_get_handle( "src_tag", 1, MB_TYPE_DOUBLE, src_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
1682  src_tags.push_back( src_tag );
1683  rval = mb.tag_get_handle( "dest_tag", 1, MB_TYPE_DOUBLE, dest_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
1684  dest_tags.push_back( dest_tag );
1685  // set values for src tag to one value, dest tag to another
1686  rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
1687  std::vector< double > tag_vals( dum_range.size(), 1.0 );
1688  rval = mb.tag_set_data( src_tag, dum_range, &tag_vals[0] );CHKERR( rval );
1689  std::fill( tag_vals.begin(), tag_vals.end(), 2.0 );
1690  rval = mb.tag_set_data( dest_tag, dum_range, &tag_vals[0] );CHKERR( rval );
1691  // do the reduce
1692  rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_SUM, dum_range );CHKERR( rval );
1693  // check the reduced value
1694  rval = check_shared_ents< double >( pcomm, dest_tag, (double)1.0, MPI_SUM );CHKERR( rval );
1695 
1696  rval = mb.tag_delete( src_tag );CHKERR( rval );
1697  rval = mb.tag_delete( dest_tag );CHKERR( rval );
1698 
1699  return MB_SUCCESS;
1700 }
1701 
1703 {
1704  Core mb_instance;
1706  ErrorCode rval;
1707 
1708  rval = moab.load_file( filename, 0,
1709  "PARALLEL=READ_PART;"
1710  "PARTITION=PARALLEL_PARTITION;"
1711  "PARALLEL_RESOLVE_SHARED_ENTS" );CHKERR( rval );
1712 
1713  // Get ghost elements
1714  ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
1715  // get some edges and faces, and delete some
1716  Range local;
1717  rval = moab.get_entities_by_dimension( 0, 1, local );CHKERR( rval );
1718  rval = moab.get_entities_by_dimension( 0, 2, local );CHKERR( rval ); // it is appending to range
1719 
1720  Range local2; // extract about half of those
1721  std::copy( local.begin(), local.begin() + local.size() / 2, range_inserter( local2 ) );
1722 
1723  // delete local 2
1724  rval = pcomm->delete_entities( local2 );CHKERR( rval );
1725 
1726  for( Range::iterator it = local2.begin(); it != local2.end(); ++it )
1727  {
1728  if( mb_instance.is_valid( *it ) ) return MB_FAILURE;
1729  }
1730  const char* opt = "PARALLEL=WRITE_PART";
1731  rval = moab.write_file( "tmpx.h5m", 0, opt );CHKERR( rval );
1732  if( pcomm->proc_config().proc_rank() == 0 ) remove( "tmpx.h5m" );
1733 
1734  return MB_SUCCESS;
1735 }
1736 
1738 {
1739  Core mb_instance;
1741  ErrorCode rval;
1742 
1743  rval = moab.load_file( filename, 0,
1744  "PARALLEL=READ_PART;"
1745  "PARTITION=PARALLEL_PARTITION;"
1746  "PARALLEL_RESOLVE_SHARED_ENTS;"
1747  "PARALLEL_GHOSTS=3.0.1" );CHKERR( rval );
1748 
1749  return MB_SUCCESS;
1750 }
1752 {
1753  Core mb_instance;
1755  ErrorCode rval;
1756 
1757  rval = moab.load_file( filename, 0,
1758  "PARALLEL=READ_PART;"
1759  "PARTITION=PARALLEL_PARTITION;"
1760  "PARALLEL_RESOLVE_SHARED_ENTS;" );
1761  // if(rval==MB_SUCCESS)
1762  // return MB_FAILURE;
1763 
1764  return rval;
1765 }
1766 
1768 {
1769  Core mb_instance;
1771  ErrorCode rval;
1772 
1773  rval = moab.load_file( filename, 0,
1774  "PARALLEL=READ_PART;"
1775  "PARTITION=PARALLEL_PARTITION;"
1776  "PARALLEL_RESOLVE_SHARED_ENTS;"
1777  "PARALLEL_GHOSTS=3.2.1;"
1778  "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
1779 
1780  // get all elements of dimension 3, and check they are on one sequence, with connect_iterate
1781  Range elems;
1782  rval = moab.get_entities_by_dimension( 0, 3, elems );CHKERR( rval );
1783  if( elems.psize() != 1 )
1784  {
1785  std::cout << " elems.psize() = " << elems.psize() << "\n";
1786  return MB_FAILURE;
1787  }
1788  // we want only one sequence
1789  int count, vpere;
1790  EntityHandle* conn_ptr;
1791  rval = moab.connect_iterate( elems.begin(), elems.end(), conn_ptr, vpere, count );CHKERR( rval );
1792 
1793  if( count != (int)elems.size() )
1794  {
1795  std::cout << " more than one sequence: elems.size() = " << elems.size() << " count:" << count << "\n";
1796  return MB_FAILURE;
1797  }
1798  // check also global id tag, which is dense
1799  Tag id_tag = moab.globalId_tag();
1800  void* globalid_data = NULL;
1801  rval = moab.tag_iterate( id_tag, elems.begin(), elems.end(), count, globalid_data );CHKERR( rval );
1802  if( count != (int)elems.size() )
1803  {
1804  std::cout << " more than one tag sequence: elems.size() = " << elems.size() << " count:" << count << "\n";
1805  return MB_FAILURE;
1806  }
1807 
1808  // repeat the tests for vertex sequences
1809  // get all elements of dimension 3, and check they are on one sequence, with coords_iterate
1810  Range verts;
1811  rval = moab.get_entities_by_dimension( 0, 0, verts );CHKERR( rval );
1812  if( verts.psize() != 1 )
1813  {
1814  std::cout << " verts.psize() = " << verts.psize() << "\n";
1815  return MB_FAILURE;
1816  }
1817  //
1818  double *x_ptr, *y_ptr, *z_ptr;
1819  rval = moab.coords_iterate( verts.begin(), verts.end(), x_ptr, y_ptr, z_ptr, count );CHKERR( rval );
1820 
1821  if( count != (int)verts.size() )
1822  {
1823  std::cout << " more than one sequence: verts.size() = " << verts.size() << " count:" << count << "\n";
1824  return MB_FAILURE;
1825  }
1826 
1827  rval = moab.tag_iterate( id_tag, verts.begin(), verts.end(), count, globalid_data );CHKERR( rval );
1828  if( count != (int)verts.size() )
1829  {
1830  std::cout << " more than one tag sequence: verts.size() = " << verts.size() << " count:" << count << "\n";
1831  return MB_FAILURE;
1832  }
1833  return MB_SUCCESS;
1834 }
1835 
1836 // test trivial partition as used by iMOAB send/receive mesh methods
1837 // it is hooked to ParCommGraph, but it is really not dependent on anything from MOAB
1838 // these methods that are tested are just utilities
1839 
1841 {
1842  // nothing is modified inside moab
1843  // by these methods;
1844  // to instantiate par com graph, we just need 2 groups!
1845  // they can be overlapping !!!! we do not test that yet
1846 
1847  // create 2 groups; one over task 0 and 1, one over 2
1848  MPI_Group worldg;
1849  MPI_Comm duplicate;
1850  MPI_Comm_dup( MPI_COMM_WORLD, &duplicate );
1851  MPI_Comm_group( duplicate, &worldg );
1852 
1853  // this is usually run on 2 processes
1854 
1855  int rank[2] = { 0, 1 };
1856  MPI_Group gr1, gr2;
1857  MPI_Group_incl( worldg, 2, rank, &gr1 ); // will contain 2 ranks, 0 and 1
1858  MPI_Group_incl( worldg, 1, &rank[1], &gr2 ); // will contain 1 rank only (1)
1859 
1860  int comp1 = 10, comp2 = 12;
1861  ParCommGraph* pgr = new ParCommGraph( duplicate, gr1, gr2, comp1, comp2 );
1862 
1863  std::map< int, Range > ranges_to_send;
1864  std::vector< int > number_elems_per_part;
1865  number_elems_per_part.push_back( 6 );
1866  number_elems_per_part.push_back( 10 );
1867 
1868  std::cout << " send sizes ";
1869  for( int k = 0; k < (int)number_elems_per_part.size(); k++ )
1870  {
1871  std::cout << " " << number_elems_per_part[k];
1872  }
1873  std::cout << "\n";
1874 
1875  std::cout << "\n";
1876  pgr->compute_trivial_partition( number_elems_per_part );
1877 
1878  Range verts( 10, 20 );
1879  pgr->split_owned_range( 0, verts );
1880 
1881  for( std::map< int, Range >::iterator it = ranges_to_send.begin(); it != ranges_to_send.end(); it++ )
1882  {
1883  Range& ran = it->second;
1884  std::cout << " receiver " << it->first << " receive range: [" << ran[0] << ", " << ran[ran.size() - 1]
1885  << "] \n";
1886  }
1887 
1888  delete( pgr );
1889 
1890  MPI_Group_free( &worldg );
1891  MPI_Group_free( &gr1 );
1892  MPI_Group_free( &gr2 );
1893  MPI_Comm_free( &duplicate );
1894 }