Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
ReadParallel.cpp
Go to the documentation of this file.
1 #include "ReadParallel.hpp"
2 #include "moab/Core.hpp"
3 #include "moab/ProcConfig.hpp"
4 #include "moab/FileOptions.hpp"
5 #include "moab/Error.hpp"
7 #include "moab/ReadUtilIface.hpp"
8 #include "moab/ParallelComm.hpp"
10 
11 #include <iostream>
12 #include <iomanip>
13 #include <iterator>
14 #include <sstream>
15 #include <algorithm>
16 #include <cassert>
17 
18 namespace moab
19 {
20 
21 const bool debug = false;
22 
23 const char* ReadParallel::ParallelActionsNames[] = { "PARALLEL READ",
24  "PARALLEL READ PART",
25  "PARALLEL BROADCAST",
26  "PARALLEL DELETE NONLOCAL",
27  "PARALLEL CHECK_GIDS_SERIAL",
28  "PARALLEL GET_FILESET_ENTS",
29  "PARALLEL RESOLVE_SHARED_ENTS",
30  "PARALLEL EXCHANGE_GHOSTS",
31  "PARALLEL RESOLVE_SHARED_SETS",
32  "PARALLEL_AUGMENT_SETS_WITH_GHOSTS",
33  "PARALLEL PRINT_PARALLEL",
34  "PARALLEL_CREATE_TRIVIAL_PARTITION",
35  "PARALLEL_CORRECT_THIN_GHOST_LAYERS" };
36 
37 const char* ReadParallel::parallelOptsNames[] = { "NONE", "BCAST", "BCAST_DELETE", "READ_DELETE", "READ_PART", "", 0 };
38 
40  : mbImpl( impl ), myPcomm( pc ), myDebug( "ReadPara", std::cerr )
41 {
42  if( !myPcomm )
43  {
45  if( NULL == myPcomm ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD );
46  }
48  if( debug ) // For backwards compatibility, enable all debug output if constant is true
49  myDebug.set_verbosity( 10 );
50 
51  impl->query_interface( mError );
52 }
53 
54 ErrorCode ReadParallel::load_file( const char** file_names,
55  const int num_files,
56  const EntityHandle* file_set,
57  const FileOptions& opts,
58  const ReaderIface::SubsetList* subset_list,
59  const Tag* file_id_tag )
60 {
61  int tmpval;
62  if( MB_SUCCESS == opts.get_int_option( "DEBUG_PIO", 1, tmpval ) )
63  {
64  myDebug.set_verbosity( tmpval );
65  myPcomm->set_debug_verbosity( tmpval );
66  }
67  myDebug.tprint( 1, "Setting up...\n" );
68 
69  // Get parallel settings
70  int parallel_mode;
71  ErrorCode result = opts.match_option( "PARALLEL", parallelOptsNames, parallel_mode );
72  if( MB_FAILURE == result )
73  {
74  MB_SET_ERR( MB_FAILURE, "Unexpected value for 'PARALLEL' option" );
75  }
76  else if( MB_ENTITY_NOT_FOUND == result )
77  {
78  parallel_mode = 0;
79  }
80 
81  // Get partition setting
82  bool distrib;
83  std::string partition_tag_name;
84  result = opts.get_option( "PARTITION", partition_tag_name );
85  if( MB_ENTITY_NOT_FOUND == result )
86  {
87  distrib = false;
88  partition_tag_name = "";
89  }
90  else
91  {
92  distrib = true;
93  if( partition_tag_name.empty() ) partition_tag_name = PARALLEL_PARTITION_TAG_NAME;
94 
95  // Also get deprecated PARTITION_DISTRIBUTE option
96  // so that higher-level code doesn't return an error
97  // due to an unrecognized option
98  opts.get_null_option( "PARTITION_DISTRIBUTE" );
99  }
100 
101  // Get partition tag value(s), if any, and whether they're to be
102  // distributed or assigned
103  std::vector< int > partition_tag_vals;
104  opts.get_ints_option( "PARTITION_VAL", partition_tag_vals );
105 
106  // see if partition tag name is "TRIVIAL", if the tag exists
107  bool create_trivial_partition = false;
108  if( partition_tag_name == std::string( "TRIVIAL" ) )
109  {
110  Tag ttag; // see if the trivial tag exists
111  result = mbImpl->tag_get_handle( partition_tag_name.c_str(), ttag );
112  if( MB_TAG_NOT_FOUND == result ) create_trivial_partition = true;
113  }
114  // See if we need to report times
115  bool cputime = false;
116  result = opts.get_null_option( "CPUTIME" );
117  if( MB_SUCCESS == result ) cputime = true;
118 
119  // See if we need to report times
120  bool print_parallel = false;
121  result = opts.get_null_option( "PRINT_PARALLEL" );
122  if( MB_SUCCESS == result ) print_parallel = true;
123 
124  // Get ghosting options
125  std::string ghost_str;
126  int bridge_dim, ghost_dim = -1, num_layers, addl_ents = 0;
127  result = opts.get_str_option( "PARALLEL_GHOSTS", ghost_str );
128  if( MB_TYPE_OUT_OF_RANGE == result )
129  {
130  ghost_dim = 3;
131  bridge_dim = 0;
132  num_layers = 1;
133  }
134  else if( MB_SUCCESS == result )
135  {
136  int num_fields = sscanf( ghost_str.c_str(), "%d.%d.%d.%d", &ghost_dim, &bridge_dim, &num_layers, &addl_ents );
137  if( 3 > num_fields )
138  {
139  MB_SET_ERR( MB_FAILURE, "Didn't read 3 fields from PARALLEL_GHOSTS string" );
140  }
141  }
142 
143  // Get resolve_shared_ents option
144  std::string shared_str;
145  int resolve_dim = -2, shared_dim = -1;
146  result = opts.get_str_option( "PARALLEL_RESOLVE_SHARED_ENTS", shared_str );
147  if( MB_TYPE_OUT_OF_RANGE == result )
148  {
149  resolve_dim = -1;
150  shared_dim = -1;
151  }
152  else if( MB_SUCCESS == result )
153  {
154  int num_fields = sscanf( shared_str.c_str(), "%d.%d", &resolve_dim, &shared_dim );
155  if( 2 != num_fields )
156  {
157  MB_SET_ERR( MB_FAILURE, "Didn't read 2 fields from PARALLEL_RESOLVE_SHARED_ENTS string" );
158  }
159  }
160 
161  // Get skip augmenting with ghosts option
162  bool skip_augment = false;
163  result = opts.get_null_option( "SKIP_AUGMENT_WITH_GHOSTS" );
164  if( MB_SUCCESS == result ) skip_augment = true;
165 
166  bool correct_thin_ghosts = false;
167  result = opts.get_null_option( "PARALLEL_THIN_GHOST_LAYER" );
168  if( MB_SUCCESS == result ) correct_thin_ghosts = true;
169 
170  // Get MPI IO processor rank
171  int reader_rank;
172  result = opts.get_int_option( "MPI_IO_RANK", reader_rank );
173  if( MB_ENTITY_NOT_FOUND == result )
174  reader_rank = 0;
175  else if( MB_SUCCESS != result )
176  {
177  MB_SET_ERR( MB_FAILURE, "Unexpected value for 'MPI_IO_RANK' option" );
178  }
179 
180  // Now that we've parsed all the parallel options, make an instruction queue
181  std::vector< int > pa_vec;
182  bool is_reader = ( reader_rank == (int)myPcomm->proc_config().proc_rank() );
183 
184  bool partition_by_rank = false;
185  if( MB_SUCCESS == opts.get_null_option( "PARTITION_BY_RANK" ) )
186  {
187  partition_by_rank = true;
188  if( !partition_tag_vals.empty() )
189  {
190  MB_SET_ERR( MB_FAILURE, "Cannot specify both PARTITION_VALS and PARTITION_BY_RANK" );
191  }
192  }
193 
194  double factor_seq;
195  if( MB_SUCCESS == opts.get_real_option( "PARALLEL_SEQUENCE_FACTOR", factor_seq ) )
196  {
197  if( factor_seq < 1. ) MB_SET_ERR( MB_FAILURE, "cannot have sequence factor less than 1." );
198  mbImpl->set_sequence_multiplier( factor_seq );
199  }
200  switch( parallel_mode )
201  {
202  case POPT_BCAST:
203  myDebug.print( 1, "Read mode is BCAST\n" );
204  if( is_reader )
205  {
206  pa_vec.push_back( PA_READ );
207  pa_vec.push_back( PA_CHECK_GIDS_SERIAL );
208  pa_vec.push_back( PA_GET_FILESET_ENTS );
209  }
210  pa_vec.push_back( PA_BROADCAST );
211  if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS );
212  break;
213 
214  case POPT_BCAST_DELETE:
215  myDebug.print( 1, "Read mode is BCAST_DELETE\n" );
216  if( is_reader )
217  {
218  pa_vec.push_back( PA_READ );
219  pa_vec.push_back( PA_CHECK_GIDS_SERIAL );
220  pa_vec.push_back( PA_GET_FILESET_ENTS );
221  if( create_trivial_partition ) pa_vec.push_back( PA_CREATE_TRIVIAL_PARTITION );
222  }
223  pa_vec.push_back( PA_BROADCAST );
224  if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS );
225  pa_vec.push_back( PA_DELETE_NONLOCAL );
226  break;
227 
228  case POPT_DEFAULT:
229  case POPT_READ_DELETE:
230  myDebug.print( 1, "Read mode is READ_DELETE\n" );
231  pa_vec.push_back( PA_READ );
232  pa_vec.push_back( PA_CHECK_GIDS_SERIAL );
233  pa_vec.push_back( PA_GET_FILESET_ENTS );
234  pa_vec.push_back( PA_DELETE_NONLOCAL );
235  break;
236 
237  case POPT_READ_PART:
238  myDebug.print( 1, "Read mode is READ_PART\n" );
239  pa_vec.push_back( PA_READ_PART );
240  break;
241 
242  default:
243  MB_SET_ERR( MB_FAILURE, "Unexpected parallel read mode" );
244  }
245 
246  if( -2 != resolve_dim ) pa_vec.push_back( PA_RESOLVE_SHARED_ENTS );
247 
248  if( -1 != ghost_dim ) pa_vec.push_back( PA_EXCHANGE_GHOSTS );
249 
250  if( -2 != resolve_dim )
251  {
252  pa_vec.push_back( PA_RESOLVE_SHARED_SETS );
253  if( -1 != ghost_dim && !skip_augment ) pa_vec.push_back( PA_AUGMENT_SETS_WITH_GHOSTS );
254  if( -1 != ghost_dim && correct_thin_ghosts ) pa_vec.push_back( PA_CORRECT_THIN_GHOSTS );
255  }
256 
257  if( print_parallel ) pa_vec.push_back( PA_PRINT_PARALLEL );
258 
259  result = load_file( file_names, num_files, file_set, parallel_mode, partition_tag_name, partition_tag_vals, distrib,
260  partition_by_rank, pa_vec, opts, subset_list, file_id_tag, reader_rank, cputime, resolve_dim,
261  shared_dim, ghost_dim, bridge_dim, num_layers, addl_ents );MB_CHK_ERR( result );
262 
263  if( parallel_mode == POPT_BCAST_DELETE && !is_reader ) opts.mark_all_seen();
264 
265  return MB_SUCCESS;
266 }
267 
268 ErrorCode ReadParallel::load_file( const char** file_names,
269  const int num_files,
270  const EntityHandle* file_set_ptr,
271  int /*parallel_mode*/,
272  std::string& partition_tag_name,
273  std::vector< int >& partition_tag_vals,
274  bool distrib,
275  bool partition_by_rank,
276  std::vector< int >& pa_vec,
277  const FileOptions& opts,
278  const ReaderIface::SubsetList* subset_list,
279  const Tag* file_id_tag,
280  const int reader_rank,
281  const bool cputime,
282  const int resolve_dim,
283  const int shared_dim,
284  const int ghost_dim,
285  const int bridge_dim,
286  const int num_layers,
287  const int addl_ents )
288 {
289  ErrorCode result = MB_SUCCESS;
290  if( myPcomm == NULL ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD );
291 
292  Range entities;
293  Tag file_set_tag = 0;
294  int other_sets = 0;
296  Range other_file_sets, file_sets;
297  Core* impl = dynamic_cast< Core* >( mbImpl );
298 
299  std::vector< double > act_times( pa_vec.size() + 1 );
300  std::vector< int >::iterator vit;
301  int i, j;
302  act_times[0] = MPI_Wtime();
303 
304  // Make a new set for the parallel read
305  EntityHandle file_set;
306  if( !file_set_ptr || !( *file_set_ptr ) )
307  {
308  result = mbImpl->create_meshset( MESHSET_SET, file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" );
309  }
310  else
311  file_set = *file_set_ptr;
312 
313  bool i_read = false;
314  Tag id_tag = 0;
315  bool use_id_tag = false;
316  Range ents;
317 
318  for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ )
319  {
320  ErrorCode tmp_result = MB_SUCCESS;
321  switch( *vit )
322  {
323  //==================
324  case PA_READ:
325  i_read = true;
326 
327  for( j = 0; j < num_files; j++ )
328  {
329  myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[j] );
330 
331  EntityHandle new_file_set;
332  result = mbImpl->create_meshset( MESHSET_SET, new_file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" );
333  tmp_result = impl->serial_load_file( file_names[j], &new_file_set, opts, subset_list, file_id_tag );
334  if( MB_SUCCESS != tmp_result ) break;
335 
336  // Put the contents of each file set for the reader into the
337  // file set for the parallel read
338  assert( 0 != new_file_set );
339  Range all_ents;
340  tmp_result = mbImpl->get_entities_by_handle( new_file_set, all_ents );
341  if( MB_SUCCESS != tmp_result ) break;
342  all_ents.insert( new_file_set );
343  tmp_result = mbImpl->add_entities( file_set, all_ents );
344  if( MB_SUCCESS != tmp_result ) break;
345  }
346  if( MB_SUCCESS != tmp_result ) break;
347 
348  // Mark the file set for this parallel reader
349  tmp_result = mbImpl->tag_get_handle( "__file_set", 1, MB_TYPE_INTEGER, file_set_tag,
351  if( MB_SUCCESS != tmp_result ) break;
352 
353  tmp_result = mbImpl->tag_set_data( file_set_tag, &file_set, 1, &other_sets );
354  break;
355 
356  //==================
357  case PA_READ_PART: {
358  myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[0] );
359 
360  i_read = true;
361  if( num_files != 1 )
362  {
363  MB_SET_ERR( MB_NOT_IMPLEMENTED, "Multiple file read not supported for READ_PART" );
364  }
365 
366  // If we're going to resolve shared entities, then we need
367  // to ask the file reader to populate a tag with unique ids
368  // (typically file ids/indices/whatever.)
369  if( std::find( pa_vec.begin(), pa_vec.end(), PA_RESOLVE_SHARED_ENTS ) != pa_vec.end() )
370  {
371  use_id_tag = true;
372  if( !file_id_tag )
373  {
374  // This tag is really used for resolving shared entities with crystal router
375  // In the end, this is an identifier that gets converted to long
376  // In hdf5 file reader, we also convert from hdf5 file id type to long
377  tmp_result = mbImpl->tag_get_handle( "", sizeof( long ), MB_TYPE_OPAQUE, id_tag,
379  if( MB_SUCCESS != tmp_result ) break;
380  file_id_tag = &id_tag;
381  }
382  }
383 
384  ReaderIface::IDTag parts = { partition_tag_name.c_str(), 0, 0 };
386  sl.num_parts = 0;
387  int rank = myPcomm->rank();
388  if( partition_by_rank )
389  {
390  assert( partition_tag_vals.empty() );
391  parts.tag_values = &rank;
392  parts.num_tag_values = 1;
393  }
394  else
395  {
396  sl.num_parts = myPcomm->size();
397  sl.part_number = myPcomm->rank();
398  if( !partition_tag_vals.empty() )
399  {
400  parts.tag_values = &partition_tag_vals[0];
401  parts.num_tag_values = partition_tag_vals.size();
402  }
403  }
404  std::vector< ReaderIface::IDTag > subset;
405  if( subset_list )
406  {
407  std::vector< ReaderIface::IDTag > tmplist( subset_list->tag_list,
408  subset_list->tag_list + subset_list->tag_list_length );
409  tmplist.push_back( parts );
410  subset.swap( tmplist );
411  sl.tag_list = &subset[0];
412  sl.tag_list_length = subset.size();
413  }
414  else
415  {
416  sl.tag_list = &parts;
417  sl.tag_list_length = 1;
418  }
419  tmp_result = impl->serial_load_file( *file_names, &file_set, opts, &sl, file_id_tag );
420  if( MB_SUCCESS != tmp_result ) break;
421 
422  if( !partition_tag_name.empty() )
423  {
424  Tag part_tag;
425  tmp_result = impl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, part_tag );
426  if( MB_SUCCESS != tmp_result ) break;
427 
428  tmp_result = impl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &part_tag, 0, 1,
429  myPcomm->partition_sets() );
430  }
431  }
432  break;
433 
434  //==================
435  case PA_GET_FILESET_ENTS:
436  myDebug.tprint( 1, "Getting fileset entities.\n" );
437 
438  // Get entities in the file set, and add actual file set to it;
439  // mark the file set to make sure any receiving procs know which it is
440  tmp_result = mbImpl->get_entities_by_handle( file_set, entities );
441  if( MB_SUCCESS != tmp_result )
442  {
443  entities.clear();
444  break;
445  }
446 
447  // Add actual file set to entities too
448  entities.insert( file_set );
449  break;
450  //==================
452  myDebug.tprint( 1, "create trivial partition, for higher dim entities.\n" );
453  // get high dim entities (2 or 3)
454  Range hi_dim_ents = entities.subset_by_dimension( 3 );
455  if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 2 );
456  if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 1 );
457  if( hi_dim_ents.empty() ) MB_SET_ERR( MB_FAILURE, "there are no elements of dim 1-3" );
458 
459  size_t num_hi_ents = hi_dim_ents.size();
460  unsigned int num_parts = myPcomm->size();
461 
462  // create first the trivial partition tag
463  int dum_id = -1;
464  Tag ttag; // trivial tag
465  tmp_result = mbImpl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, ttag,
466  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );MB_CHK_SET_ERR( tmp_result, "Can't create trivial partition tag" );
467 
468  // Compute the number of high dim entities on each part
469  size_t nPartEnts = num_hi_ents / num_parts;
470 
471  // Number of extra entities after equal split over parts
472  int iextra = num_hi_ents % num_parts;
473  Range::iterator itr = hi_dim_ents.begin();
474  for( int k = 0; k < (int)num_parts; k++ )
475  {
476  // create a mesh set, insert a subrange of entities
477  EntityHandle part_set;
478  tmp_result = mbImpl->create_meshset( MESHSET_SET, part_set );MB_CHK_SET_ERR( tmp_result, "Can't create part set" );
479  entities.insert( part_set );
480 
481  tmp_result = mbImpl->tag_set_data( ttag, &part_set, 1, &k );MB_CHK_SET_ERR( tmp_result, "Can't set trivial partition tag" );
482  Range subrange;
483  size_t num_ents_in_part = nPartEnts;
484  if( i < iextra ) num_ents_in_part++;
485  for( size_t i1 = 0; i1 < num_ents_in_part; i1++, itr++ )
486  subrange.insert( *itr );
487  tmp_result = mbImpl->add_entities( part_set, subrange );MB_CHK_SET_ERR( tmp_result, "Can't add entities to trivial part " << k );
488  myDebug.tprintf( 1, "create trivial part %d with %lu entities \n", k, num_ents_in_part );
489  tmp_result = mbImpl->add_entities( file_set, &part_set, 1 );MB_CHK_SET_ERR( tmp_result, "Can't add trivial part to file set " << k );
490  }
491  }
492 
493  break;
494  //==================
495  case PA_BROADCAST:
496  // Do the actual broadcast; if single-processor, ignore error
497  myDebug.tprint( 1, "Broadcasting mesh.\n" );
498 
499  if( myPcomm->proc_config().proc_size() > 1 )
500  {
501  tmp_result = myPcomm->broadcast_entities( reader_rank, entities );
502  if( MB_SUCCESS != tmp_result ) break;
503  }
504 
505  if( debug )
506  {
507  std::cerr << "Bcast done; entities:" << std::endl;
508  mbImpl->list_entities( 0, 0 );
509  }
510 
511  // Add the received entities to this fileset if I wasn't the reader
512  if( !i_read && MB_SUCCESS == tmp_result ) tmp_result = mbImpl->add_entities( file_set, entities );
513 
514  break;
515 
516  //==================
517  case PA_DELETE_NONLOCAL:
518  myDebug.tprint( 1, "Deleting nonlocal entities.\n" );
519 
520  tmp_result = delete_nonlocal_entities( partition_tag_name, partition_tag_vals, distrib, file_set );
521  if( debug )
522  {
523  std::cerr << "Delete nonlocal done; entities:" << std::endl;
524  mbImpl->list_entities( 0, 0 );
525  }
526 
527  if( MB_SUCCESS == tmp_result ) tmp_result = create_partition_sets( partition_tag_name, file_set );
528 
529  break;
530 
531  //==================
533  myDebug.tprint( 1, "Checking global IDs.\n" );
534 
535  tmp_result = myPcomm->check_global_ids( file_set, 0, 1, true, false );
536  break;
537 
538  //==================
540  myDebug.tprint( 1, "Resolving shared entities.\n" );
541 
542  if( 1 == myPcomm->size() )
543  tmp_result = MB_SUCCESS;
544  else
545  tmp_result =
546  myPcomm->resolve_shared_ents( file_set, resolve_dim, shared_dim, use_id_tag ? file_id_tag : 0 );
547  if( MB_SUCCESS != tmp_result ) break;
548 
549 #ifndef NDEBUG
550  // check number of owned vertices through pcomm's public interface
551  tmp_result = mbImpl->get_entities_by_type( 0, MBVERTEX, ents );
552  if( MB_SUCCESS == tmp_result )
553  tmp_result = myPcomm->filter_pstatus( ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );
554  if( MB_SUCCESS == tmp_result )
555  myDebug.tprintf( 1, "Proc %u reports %lu owned vertices.\n", myPcomm->proc_config().proc_rank(),
556  ents.size() );
557 #endif
558  break;
559 
560  //==================
561  case PA_EXCHANGE_GHOSTS:
562  myDebug.tprint( 1, "Exchanging ghost entities.\n" );
563 
564  tmp_result = myPcomm->exchange_ghost_cells( ghost_dim, bridge_dim, num_layers, addl_ents, true, true,
565  &file_set );
566  break;
567 
568  //==================
570  myDebug.tprint( 1, "Resolving shared sets.\n" );
571 
572  if( 1 == myPcomm->size() )
573  tmp_result = MB_SUCCESS;
574  else
575  tmp_result = myPcomm->resolve_shared_sets( file_set, use_id_tag ? file_id_tag : 0 );
576  break;
577 
578  //==================
580  myDebug.tprint( 1, "Augmenting sets with ghost entities.\n" );
581 
582  if( 1 == myPcomm->size() )
583  tmp_result = MB_SUCCESS;
584  else
585  tmp_result = myPcomm->augment_default_sets_with_ghosts( file_set );
586  break;
587  //==================
589  myDebug.tprint( 1, "correcting thin ghost layers.\n" );
590  if( 2 >= myPcomm->size() ) // it is a problem only for multi-shared entities
591  tmp_result = MB_SUCCESS;
592  else
593  tmp_result = myPcomm->correct_thin_ghost_layers();
594  break;
595  case PA_PRINT_PARALLEL:
596  myDebug.tprint( 1, "Printing parallel information.\n" );
597 
598  tmp_result = myPcomm->list_entities( 0, -1 );
599  break;
600 
601  //==================
602  default:
603  MB_SET_ERR( MB_FAILURE, "Unexpected parallel action" );
604  } // switch (*vit)
605 
606  if( MB_SUCCESS != tmp_result )
607  {
608  MB_SET_ERR( MB_FAILURE, "Failed in step " << ParallelActionsNames[*vit] );
609  }
610 
611  if( cputime ) act_times[i] = MPI_Wtime();
612  } // for (i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++)
613 
614  if( use_id_tag )
615  {
616  result = mbImpl->tag_delete( id_tag );MB_CHK_SET_ERR( result, "Trouble deleting id tag" );
617  }
618 
619  if( cputime )
620  {
621  for( i = pa_vec.size(); i > 0; i-- )
622  act_times[i] -= act_times[i - 1];
623 
624  // Replace initial time with overall time
625  act_times[0] = MPI_Wtime() - act_times[0];
626  // Get the maximum over all procs
627  if( 0 != myPcomm->proc_config().proc_rank() )
628  {
629  MPI_Reduce( &act_times[0], 0, pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0,
631  }
632  else
633  {
634 #if( MPI_VERSION >= 2 )
635  MPI_Reduce( MPI_IN_PLACE, &act_times[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0,
637 #else
638  // Note, extra comm-size allocation is required
639  std::vector< double > act_times_tmp( pa_vec.size() + 1 );
640  MPI_Reduce( &act_times[0], &act_times_tmp[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0,
642  act_times = act_times_tmp; // extra copy here too
643 #endif
644  std::cout << "Parallel Read times: " << std::endl;
645  for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ )
646  std::cout << " " << act_times[i] << " " << ParallelActionsNames[*vit] << std::endl;
647  std::cout << " " << act_times[0] << " PARALLEL TOTAL" << std::endl;
648  }
649  }
650 
651  return MB_SUCCESS;
652 }
653 
655  std::vector< int >& ptag_vals,
656  bool distribute,
657  EntityHandle file_set )
658 {
659  Range partition_sets;
660 
661  Tag ptag;
662  ErrorCode result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag );MB_CHK_SET_ERR( result, "Failed getting tag handle in delete_nonlocal_entities" );
663 
664  result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, NULL, 1, myPcomm->partition_sets() );MB_CHK_SET_ERR( result, "Failed to get sets with partition-type tag" );
665 
666  int proc_sz = myPcomm->proc_config().proc_size();
667  int proc_rk = myPcomm->proc_config().proc_rank();
668 
669  if( !ptag_vals.empty() )
670  {
671  // Values input, get sets with those values
672  Range tmp_sets;
673  std::vector< int > tag_vals( myPcomm->partition_sets().size() );
674  result = mbImpl->tag_get_data( ptag, myPcomm->partition_sets(), &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tag data for partition vals tag" );
675  for( std::vector< int >::iterator pit = tag_vals.begin(); pit != tag_vals.end(); ++pit )
676  {
677  std::vector< int >::iterator pit2 = std::find( ptag_vals.begin(), ptag_vals.end(), *pit );
678  if( pit2 != ptag_vals.end() ) tmp_sets.insert( myPcomm->partition_sets()[pit - tag_vals.begin()] );
679  }
680 
681  myPcomm->partition_sets().swap( tmp_sets );
682  }
683 
684  if( distribute )
685  {
686  // For now, require that number of partition sets be greater
687  // than number of procs
688  if( myPcomm->partition_sets().size() < (unsigned int)proc_sz )
689  {
690  MB_SET_ERR( MB_FAILURE, "Too few parts; P = " << proc_rk << ", tag = " << ptag
691  << ", # sets = " << myPcomm->partition_sets().size() );
692  }
693 
694  Range tmp_sets;
695  // Distribute the partition sets
696  unsigned int num_sets = myPcomm->partition_sets().size() / proc_sz;
697  unsigned int num_leftover = myPcomm->partition_sets().size() % proc_sz;
698  int begin_set = 0;
699  if( proc_rk < (int)num_leftover )
700  {
701  num_sets++;
702  begin_set = num_sets * proc_rk;
703  }
704  else
705  begin_set = proc_rk * num_sets + num_leftover;
706 
707  for( unsigned int i = 0; i < num_sets; i++ )
708  tmp_sets.insert( myPcomm->partition_sets()[begin_set + i] );
709 
710  myPcomm->partition_sets().swap( tmp_sets );
711  }
712 
713  myDebug.print( 1, "My partition sets: ", myPcomm->partition_sets() );
714 
715  result = delete_nonlocal_entities( file_set );MB_CHK_ERR( result );
716 
717  return MB_SUCCESS;
718 }
719 
720 ErrorCode ReadParallel::create_partition_sets( std::string& ptag_name, EntityHandle file_set )
721 {
722  int proc_rk = myPcomm->proc_config().proc_rank();
723  ErrorCode result = MB_SUCCESS;
724 
725  Tag ptag;
726 
727  // Tag the partition sets with a standard tag name
728  if( ptag_name.empty() ) ptag_name = PARALLEL_PARTITION_TAG_NAME;
729  bool tag_created = false;
730  result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag, MB_TAG_SPARSE | MB_TAG_CREAT, 0,
731  &tag_created );MB_CHK_SET_ERR( result, "Trouble getting PARALLEL_PARTITION tag" );
732 
733  if( !tag_created )
734  {
735  // This tag already exists; better check to see that tagged sets
736  // agree with this partition
737  Range tagged_sets;
738  int* proc_rk_ptr = &proc_rk;
739  result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, (const void* const*)&proc_rk_ptr,
740  1, tagged_sets );MB_CHK_SET_ERR( result, "Trouble getting tagged sets" );
741  if( !tagged_sets.empty() && tagged_sets != myPcomm->partition_sets() )
742  {
743  result = mbImpl->tag_delete_data( ptag, tagged_sets );MB_CHK_SET_ERR( result, "Trouble deleting data of PARALLEL_PARTITION tag" );
744  }
745  else if( tagged_sets == myPcomm->partition_sets() )
746  return MB_SUCCESS;
747  }
748 
749  // If we get here, we need to assign the tag
750  std::vector< int > values( myPcomm->partition_sets().size() );
751  for( unsigned int i = 0; i < myPcomm->partition_sets().size(); i++ )
752  values[i] = proc_rk;
753  result = mbImpl->tag_set_data( ptag, myPcomm->partition_sets(), &values[0] );MB_CHK_SET_ERR( result, "Trouble setting data to PARALLEL_PARTITION tag" );
754 
755  return MB_SUCCESS;
756 }
757 
759 {
760  // Get partition entities and ents related to/used by those
761  // get ents in the partition
762  ReadUtilIface* read_iface;
763  mbImpl->query_interface( read_iface );
764  Range partition_ents, all_sets;
765 
766  myDebug.tprint( 2, "Gathering related entities.\n" );
767 
768  ErrorCode result = read_iface->gather_related_ents( myPcomm->partition_sets(), partition_ents, &file_set );MB_CHK_SET_ERR( result, "Failure gathering related entities" );
769 
770  // Get pre-existing entities
771  Range file_ents;
772  result = mbImpl->get_entities_by_handle( file_set, file_ents );MB_CHK_SET_ERR( result, "Couldn't get pre-existing entities" );
773 
774  if( 0 == myPcomm->proc_config().proc_rank() )
775  {
776  myDebug.print( 2, "File entities: ", file_ents );
777  }
778 
779  // Get deletable entities by subtracting partition ents from file ents
780  Range deletable_ents = subtract( file_ents, partition_ents );
781 
782  // Cache deletable vs. keepable sets
783  Range deletable_sets = deletable_ents.subset_by_type( MBENTITYSET );
784  Range keepable_sets = subtract( file_ents.subset_by_type( MBENTITYSET ), deletable_sets );
785 
786  myDebug.tprint( 2, "Removing deletable entities from keepable sets.\n" );
787 
788  // Remove deletable ents from all keepable sets
789  for( Range::iterator rit = keepable_sets.begin(); rit != keepable_sets.end(); ++rit )
790  {
791  result = mbImpl->remove_entities( *rit, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" );
792  }
793  result = mbImpl->remove_entities( file_set, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" );
794 
795  myDebug.tprint( 2, "Deleting deletable entities.\n" );
796 
797  if( 0 == myPcomm->proc_config().proc_rank() )
798  {
799  myDebug.print( 2, "Deletable sets: ", deletable_sets );
800  }
801 
802  // Delete sets, then ents
803  if( !deletable_sets.empty() )
804  {
805  result = mbImpl->delete_entities( deletable_sets );MB_CHK_SET_ERR( result, "Failure deleting sets in delete_nonlocal_entities" );
806  }
807 
808  deletable_ents -= deletable_sets;
809 
810  if( 0 == myPcomm->proc_config().proc_rank() )
811  {
812  myDebug.print( 2, "Deletable entities: ", deletable_ents );
813  }
814 
815  if( !deletable_ents.empty() )
816  {
817  result = mbImpl->delete_entities( deletable_ents );MB_CHK_SET_ERR( result, "Failure deleting entities in delete_nonlocal_entities" );
818  }
819 
820  return MB_SUCCESS;
821 }
822 
823 } // namespace moab