MOAB: Mesh Oriented datABase  (version 5.5.0)
mbparallelcomm_test.cpp
Go to the documentation of this file.
1 /** test of ParallelComm functionality
2  *
3  * To run:
4  *
5  * mpirun -np <#procs> mbparallelcomm_test
6  *
7  */
8 
9 #include "moab/ParallelComm.hpp"
10 #include "MBParallelConventions.h"
11 #include "MBTagConventions.hpp"
12 #include "moab/Core.hpp"
13 #include "ScdVertexData.hpp"
14 #include "StructuredElementSeq.hpp"
15 #include "SequenceManager.hpp"
16 #include "moab/Error.hpp"
17 #include "moab_mpi.h"
18 #include <iostream>
19 #include <sstream>
20 #include <cassert>
21 
22 #define REALTFI 1
23 
24 const bool debug = false;
25 
26 using namespace moab;
27 
28 #define ERROR( a, b ) \
29  { \
30  std::cerr << ( a ) << std::endl; \
31  return b; \
32  }
33 
34 #define PRINT_LAST_ERROR \
35  { \
36  std::string last_error; \
37  result = mbImpl->get_last_error( last_error ); \
38  if( last_error.empty() ) \
39  std::cerr << "(none)" << std::endl; \
40  else \
41  std::cerr << last_error << std::endl; \
42  }
43 #define RRA( a ) \
44  if( MB_SUCCESS != result ) \
45  { \
46  std::cerr << ( a ); \
47  return result; \
48  }
49 
50 ErrorCode create_linear_mesh( Interface* mbImpl, int N, int M, int& nshared );
51 
52 ErrorCode create_scd_mesh( Interface* mbImpl, int IJK, int& nshared );
53 
55  std::vector< std::string >& filenames,
56  const char* tag_name,
57  int tag_val,
58  int distrib,
59  int parallel_option,
60  int resolve_shared,
61  int with_ghosts,
62  int use_mpio,
63  bool print_parallel );
64 
65 ErrorCode test_packing( Interface* mbImpl, const char* filename );
66 
68 
69 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs );
70 
71 void print_usage( const char* );
72 
73 int main( int argc, char** argv )
74 {
75  // need to init MPI first, to tell how many procs and rank
76  MPI_Init( &argc, &argv );
77 
78  int nprocs, rank;
79  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
80  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
81 
82  // start time
83  double stime = 0, rtime = 0, dtime = 0, ltime = 0;
84  if( 0 == rank ) stime = MPI_Wtime();
85 
86  // create MOAB instance based on that
87  Interface* mbImpl = new Core;
88  if( NULL == mbImpl ) return 1;
89 
90  ErrorCode result = MB_SUCCESS;
91 
92  // each interior proc has a vector of N+M vertices, sharing
93  // M vertices each with lower- and upper-rank processors, except
94  // procs on the end
95 
96  // get N, M from command line
97  if( argc < 3 )
98  {
99  if( 0 == rank ) print_usage( argv[0] );
100  MPI_Finalize();
101  return 1;
102  }
103 
104  int npos = 1, tag_val, distrib, with_ghosts = 1, resolve_shared = 1, use_mpio = 0;
105  bool print_parallel = false;
106  const char* tag_name;
107  std::vector< std::string > filenames;
108  int parallel_option = 0;
109  int num_files;
110 
111  if( !strcmp( argv[npos], "-p" ) ) print_parallel = true;
112 
113  while( npos != argc )
114  {
115  ErrorCode tmp_result;
116  int this_opt = strtol( argv[npos++], NULL, 0 );
117  switch( this_opt )
118  {
119  case 0:
120  case -1:
121  case -2:
122  case -3:
123  parallel_option = this_opt;
124  continue;
125 
126  case 3:
127  // read a file in parallel from the filename on the command line
128  tag_name = "MATERIAL_SET";
129  tag_val = -1;
130  num_files = strtol( argv[npos++], NULL, 0 );
131  if( 0 == num_files )
132  {
133  if( 0 == rank ) print_usage( argv[0] );
134  MPI_Finalize();
135  return 1;
136  }
137  while( num_files-- && npos < argc )
138  filenames.push_back( std::string( argv[npos++] ) );
139  if( npos < argc ) tag_name = argv[npos++];
140  if( npos < argc ) tag_val = strtol( argv[npos++], NULL, 0 );
141  if( npos < argc )
142  distrib = strtol( argv[npos++], NULL, 0 );
143  else
144  distrib = 1;
145  if( npos < argc ) resolve_shared = strtol( argv[npos++], NULL, 0 );
146  if( npos < argc ) with_ghosts = strtol( argv[npos++], NULL, 0 );
147  if( npos < argc ) use_mpio = strtol( argv[npos++], NULL, 0 );
148 
149  tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared,
150  with_ghosts, use_mpio, print_parallel );
151  if( MB_SUCCESS != tmp_result )
152  {
153  result = tmp_result;
154  std::cerr << "Couldn't read mesh; error message:" << std::endl;
156  MPI_Abort( MPI_COMM_WORLD, result );
157  }
158  break;
159 
160  case 4:
161  filenames.push_back( argv[npos++] );
162  tmp_result = test_packing( mbImpl, filenames[0].c_str() );
163  if( MB_SUCCESS != tmp_result )
164  {
165  result = tmp_result;
166  std::cerr << "Packing test failed; error message:" << std::endl;
168  }
169  break;
170 
171  case 5:
172  // read a file in parallel from the filename on the command line
173  tag_name = "MATERIAL_SET";
174  distrib = 1;
175  tag_val = -1;
176  with_ghosts = 0;
177  resolve_shared = 1;
178  while( npos < argc )
179  filenames.push_back( std::string( argv[npos++] ) );
180  tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared,
181  with_ghosts, use_mpio, print_parallel );
182  if( MB_SUCCESS != tmp_result )
183  {
184  result = tmp_result;
185  std::cerr << "Couldn't read mesh; error message:" << std::endl;
187  MPI_Abort( MPI_COMM_WORLD, result );
188  }
189  break;
190 
191  default:
192  std::cerr << "Unrecognized option \"" << this_opt << "\"; skipping." << std::endl;
193  tmp_result = MB_FAILURE;
194  }
195 
196  if( 0 == rank ) rtime = MPI_Wtime();
197  }
198 
199  if( 0 == rank ) dtime = MPI_Wtime();
200 
201  result = mbImpl->delete_mesh();
202  if( MB_SUCCESS != result )
203  {
204  std::cerr << "Couldn't delete mesh on rank " << rank << "; error message: " << std::endl;
206  }
207  if( 0 == rank ) ltime = MPI_Wtime();
208 
209  if( MB_SUCCESS == result ) std::cerr << "Proc " << rank << ": Success." << std::endl;
210 
211  if( 0 == rank )
212  std::cout << "Times: " << dtime - stime << " " << rtime - stime << " " << ltime - dtime
213  << " (total/read/delete)" << std::endl;
214 
215  MPI_Finalize();
216 
217  delete mbImpl;
218 
219  return ( MB_SUCCESS == result ? 0 : 1 );
220 }
221 
222 void print_usage( const char* command )
223 {
224  std::cerr << "Usage: " << command << " [readpar_option] <opt> <input> [...] where:" << std::endl
225  << " readpar_option = 0 (BCAST_DELETE) (default), -1 (READ_DELETE), " << std::endl
226  << " -2 (READ_PARALLEL), -3 (BCAST)" << std::endl
227  << "opt input" << std::endl
228  << "=== =====" << std::endl
229  << " 1 <linear_ints> <shared_verts> " << std::endl
230  << " 2 <n_ints> " << std::endl
231  << " 3* <# files> <file_names...> [<tag_name>=\"MATERIAL_SET\" [tag_val] "
232  "[distribute=1] [resolve_shared=1] [with_ghosts=1] [use_mpio=0]"
233  << std::endl
234  << " 4 <file_name> " << std::endl
235  << "*Note: if opt 3 is used, it must be the last one." << std::endl;
236 }
237 
239 {
240  // get and report various numbers...
241  int rank;
242  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
243 
244  Range matsets, geomsets, parsets;
245  int nsets;
246  Tag mtag = 0, gtag = 0, ptag = 0, gidtag;
247  ErrorCode result = mbImpl->tag_get_handle( "MATERIAL_SET", 1, MB_TYPE_INTEGER, mtag );
248  if( MB_SUCCESS != result )
249  {
250  std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl;
251  return result;
252  }
253  result = mbImpl->tag_get_handle( "GEOM_DIMENSION", 1, MB_TYPE_INTEGER, gtag );
254  if( MB_SUCCESS != result )
255  {
256  std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl;
257  return result;
258  }
259  result = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, ptag );
260  if( MB_SUCCESS != result )
261  {
262  std::cerr << "Couldn't PARALLEL_PARTITION tag." << std::endl;
263  return result;
264  }
265  result = mbImpl->tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, gidtag );
266  if( MB_SUCCESS != result )
267  {
268  std::cerr << "Couldn't get GLOBAL_ID tag." << std::endl;
269  return result;
270  }
271  result = mbImpl->get_number_entities_by_type( 0, MBENTITYSET, nsets );
272  if( MB_SUCCESS != result )
273  {
274  std::cerr << "Couldn't get number entities by type." << std::endl;
275  return result;
276  }
277  std::cout << "Proc " << rank << ": Total of " << nsets << " entity sets." << std::endl;
278 
279 #define PRINTSETS( a, b, c, p ) \
280  if( a ) \
281  { \
282  result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &( a ), p, 1, b ); \
283  if( !( b ).empty() ) \
284  { \
285  std::vector< int > ids( ( b ).size() ); \
286  result = mbImpl->tag_get_data( gidtag, b, &ids[0] ); \
287  if( MB_SUCCESS == result ) \
288  { \
289  std::cout << "Proc " << rank << ": " << ( c ) << " (total " << ( b ).size() << "): " << ids[0]; \
290  for( unsigned int i = 1; i < ( b ).size(); i++ ) \
291  std::cout << ", " << ids[i]; \
292  std::cout << std::endl; \
293  } \
294  } \
295  }
296 
297  PRINTSETS( mtag, matsets, "material sets", NULL );
298 
299  int tval = 3;
300  void* pval = &tval;
301 
302  PRINTSETS( gtag, geomsets, "geom sets (vols)", &pval );
303  tval = 2;
304  geomsets.clear();
305  PRINTSETS( gtag, geomsets, "geom sets (surfs)", &pval );
306  tval = 1;
307  geomsets.clear();
308  PRINTSETS( gtag, geomsets, "geom sets (curves)", &pval );
309  tval = 0;
310  geomsets.clear();
311  PRINTSETS( gtag, geomsets, "geom sets (verts)", &pval );
312 
313  PRINTSETS( ptag, parsets, "partition sets", NULL );
314 
315  if( debug )
316  {
317  // list info on all ent sets, reuse parsets
318  parsets.clear();
319  result = mbImpl->get_entities_by_type( 0, MBENTITYSET, parsets );
320  if( MB_SUCCESS == result )
321  {
322  std::cout << "Total sets (by range): " << parsets.size() << "; sets: " << std::endl;
323  parsets.print( " " );
324  mbImpl->list_entities( parsets );
325  }
326  }
327 
328  return MB_SUCCESS;
329 }
330 
332  std::vector< std::string >& filenames,
333  const char* tag_name,
334  int tag_val,
335  int distrib,
336  int parallel_option,
337  int resolve_shared,
338  int with_ghosts,
339  int use_mpio,
340  bool print_parallel )
341 {
342  std::ostringstream options;
343  switch( parallel_option )
344  {
345  case 0:
346  options << "PARALLEL=BCAST_DELETE;PARTITION=" << tag_name;
347  break;
348  case -1:
349  options << "PARALLEL=READ_DELETE;PARTITION=" << tag_name;
350  break;
351  case -2:
352  options << "PARALLEL=READ_PART;PARTITION=" << tag_name;
353  break;
354  case -3:
355  options << "PARALLEL=BCAST;PARTITION=" << tag_name;
356  break;
357  default:
358  return MB_FAILURE;
359  }
360 
361  if( -1 != tag_val ) options << ";PARTITION_VAL=" << tag_val;
362 
363  if( 1 == distrib ) options << ";PARTITION_DISTRIBUTE";
364 
365  if( 1 == resolve_shared ) options << ";PARALLEL_RESOLVE_SHARED_ENTS";
366 
367  if( 1 == with_ghosts ) options << ";PARALLEL_GHOSTS=3.0.1";
368 
369  if( 1 == use_mpio ) options << ";USE_MPIO";
370 
371  options << ";CPUTIME";
372 
373  if( print_parallel ) options << ";PRINT_PARALLEL";
374 
375  std::vector< ParallelComm* > pcs( filenames.size() );
376  ErrorCode result = MB_FAILURE;
377 
378  if( 1 < filenames.size() )
379  {
380  for( unsigned int i = 0; i < filenames.size(); i++ )
381  {
382  pcs[i] = new ParallelComm( mbImpl, MPI_COMM_WORLD );
383  int index = pcs[i]->get_id();
384  std::ostringstream newopts;
385  newopts << options.str();
386  newopts << ";PARALLEL_COMM=" << index;
387  result = mbImpl->load_file( filenames[i].c_str(), 0, newopts.str().c_str() );
388 
389  if( MB_SUCCESS != result ) PRINT_LAST_ERROR;
390 
391  if( MB_SUCCESS != result )
392  {
393  MPI_Abort( MPI_COMM_WORLD, result );
394  break;
395  }
396 
397  // exchange tag
398  Range tmp_range;
399  result = pcs[i]->exchange_tags( "GLOBAL_ID", tmp_range );
400  if( MB_SUCCESS != result )
401  {
402  std::cerr << "Tag exchange didn't work." << std::endl;
403  break;
404  }
405  }
406  }
407  else
408  {
409  result = mbImpl->load_file( filenames[0].c_str(), 0, options.str().c_str() );
410  RRA( "Failed to load file." );
411  pcs[0] = ParallelComm::get_pcomm( mbImpl, 0 );
412  assert( pcs[0] );
413  }
414 
415  if( MB_SUCCESS == result ) report_iface_ents( mbImpl, pcs );
416 
417  return result;
418 }
419 
420 ErrorCode test_packing( Interface* mbImpl, const char* filename )
421 {
422  // read the mesh
423  EntityHandle file_set;
424  ErrorCode result = mbImpl->create_meshset( MESHSET_SET, file_set );
425  RRA( "create_meshset failed." );
426 
427  result = mbImpl->load_file( filename, &file_set, NULL );
428  if( MB_SUCCESS != result )
429  {
430  std::cerr << "Reading file failed; message:" << std::endl;
432  return result;
433  }
434 
435  // get 3d entities and pack a buffer with them
436  Range ents, whole_range;
437  std::vector< EntityHandle > new_ents;
438  result = mbImpl->get_entities_by_handle( file_set, ents );
439  RRA( "Getting 3d ents failed." );
440 
441  ents.insert( file_set );
442 
443  ParallelComm* pcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD );
444 
446  result = pcomm->pack_buffer( ents, false, true, false, -1, &buff );
447  RRA( "Packing buffer count (non-stored handles) failed." );
448 
449  std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
450  std::vector< std::vector< int > > L1p;
451  std::vector< EntityHandle > L2hloc, L2hrem;
452  std::vector< unsigned int > L2p;
453 
454  buff.reset_ptr();
455  result = pcomm->unpack_buffer( buff.buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );
456  RRA( "Unpacking buffer (non-stored handles) failed." );
457 
458  return MB_SUCCESS;
459 }
460 
461 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs )
462 {
463  Range iface_ents[6];
464  ErrorCode result = MB_SUCCESS, tmp_result;
465 
466  // now figure out which vertices are shared
467  Range part_ents, part_verts;
468  for( unsigned int p = 0; p < pcs.size(); p++ )
469  {
470  // get entities owned by this partition
471  for( Range::iterator rit = pcs[p]->partition_sets().begin(); rit != pcs[p]->partition_sets().end(); ++rit )
472  {
473  tmp_result = mbImpl->get_entities_by_dimension( *rit, 3, part_ents, true );
474  if( MB_SUCCESS != tmp_result ) result = tmp_result;
475  }
476 
477  for( int i = 0; i < 4; i++ )
478  {
479  tmp_result = pcs[p]->get_iface_entities( -1, i, iface_ents[i] );
480 
481  if( MB_SUCCESS != tmp_result )
482  {
483  std::cerr << "get_iface_entities returned error on proc " << pcs[p]->proc_config().proc_rank()
484  << "; message: " << std::endl;
485  std::string last_error;
486  result = mbImpl->get_last_error( last_error );
487  if( last_error.empty() )
488  std::cerr << "(none)" << std::endl;
489  else
490  std::cerr << last_error << std::endl;
491  result = tmp_result;
492  }
493  if( 0 != i ) iface_ents[4].merge( iface_ents[i] );
494  }
495  }
496 
497  // get non-owned vertices
498  result = pcs[0]->get_pstatus_entities( 0, PSTATUS_NOT_OWNED, part_verts );
499  if( MB_SUCCESS != result )
500  {
501  std::cerr << "Couldn't get non-owned entities." << std::endl;
502  return result;
503  }
504  int tot_verts;
505  result = mbImpl->get_number_entities_by_dimension( 0, 0, tot_verts );
506  if( MB_SUCCESS != result )
507  {
508  std::cerr << "Couldn't get number of vertices." << std::endl;
509  return result;
510  }
511  tot_verts -= part_verts.size();
512 
513  // report # iface entities
514  result = mbImpl->get_adjacencies( iface_ents[4], 0, false, iface_ents[5], Interface::UNION );
515 
516  int rank;
517  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
518 
519  std::cerr << "Proc " << rank << " iface entities: " << std::endl;
520  for( int i = 0; i < 4; i++ )
521  std::cerr << " " << iface_ents[i].size() << " " << i << "d iface entities." << std::endl;
522  std::cerr << " (" << iface_ents[5].size() << " verts adj to other iface ents)" << std::endl;
523  if( iface_ents[0].size() != iface_ents[5].size() )
524  std::cerr << "WARNING: number of interface vertices don't agree with "
525  << "vertex adjacencies on interface entities." << std::endl;
526 
527  // report # regions owned by this proc
528  std::cout << "Proc " << rank << " owns " << part_ents.size() << " 3d entities." << std::endl;
529 
530  // get total # regions over all procs
531  int num_local[2], num_total[2];
532  num_local[0] = tot_verts;
533  num_local[1] = part_ents.size();
534 
535  int failure = MPI_Reduce( num_local, num_total, 2, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
536  if( failure ) result = MB_FAILURE;
537 
538  if( 0 == rank )
539  {
540  std::cout << "Total # owned vertices = " << num_total[0] << std::endl;
541  std::cout << "Total # owned regions = " << num_total[1] << std::endl;
542  }
543 
544  return result;
545 }