MOAB: Mesh Oriented datABase  (version 5.5.0)
structured3.cpp
Go to the documentation of this file.
1 #include "moab/Core.hpp"
2 #include "moab/ParallelComm.hpp"
3 #include "moab/ScdInterface.hpp"
4 #include "moab/HomXform.hpp"
5 #include "moab/ProgOptions.hpp"
6 #include "MBTagConventions.hpp"
7 #include "TestUtil.hpp"
8 #include <string>
9 #include <iomanip>
10 #include <iostream>
11 #include <cassert>
12 
13 using namespace moab;
14 
15 // Number of cells in each direction:
16 int NC;
17 
18 /* This mesh creates a box that is NCxNCxNC in global dimension, partitioned among processors
19  * automatically using ScdInterface's SQIJK algorithm. It checks to make sure there are enough
20  * procs to support this partition method. After mesh creation, shared vertex resolution is done,
21  * then ghost exchange is done.
22  */
23 const int ITERS = 50;
24 
26 
27 int main( int argc, char* argv[] )
28 {
29  MPI_Init( &argc, &argv );
30 
31  ProgOptions po;
32  po.addOpt< int >( "int,i", "Number of intervals on a side" );
33  po.parseCommandLine( argc, argv );
34  if( !po.getOpt( "int", &NC ) ) NC = 4;
35 
36  int err = RUN_TEST( create_parallel_mesh );
37 
38  MPI_Finalize();
39  return err;
40 }
41 
43 {
44  Core mbint;
46  ScdInterface* scdi;
47  ErrorCode rval = mbint.query_interface( scdi );CHECK_ERR( rval );
48  // pc.set_debug_verbosity(2);
49 
50  // create a structured mesh in parallel
51  ScdBox* new_box;
52  ScdParData par_data;
53  par_data.pComm = &pc;
54  par_data.gDims[0] = par_data.gDims[1] = par_data.gDims[2] = 0;
55  par_data.gDims[3] = par_data.gDims[4] = par_data.gDims[5] = NC;
56  if( ( par_data.gDims[3] - par_data.gDims[0] ) * ( par_data.gDims[3] - par_data.gDims[0] ) *
57  ( par_data.gDims[3] - par_data.gDims[0] ) <
58  (int)pc.size() )
59  {
60  std::cerr << "Too few processors for this number of elements." << std::endl;CHECK_ERR( MB_FAILURE );
61  }
62 
63  par_data.partMethod = ScdParData::SQIJK;
64 
65  // timing data
66  double times[5]; // tstart, tvert, tnonvert, tghost, titer;
67  times[0] = MPI_Wtime();
68  rval = scdi->construct_box( HomCoord(), HomCoord(), NULL, 0, // no vertex positions
69  new_box, NULL, // not locally periodic
70  &par_data, true,
71  false ); // assign global ids, don't resolve shared verts
72  CHECK_ERR( rval );
73 
74  // get global id tag
75  Tag tag = mbint.globalId_tag();
76 
77  // resolve shared verts
78  std::cout << "Resolving shared ents..." << std::endl;
79  rval = pc.resolve_shared_ents( new_box->box_set(), -1, 0, &tag );CHECK_ERR( rval );
80  times[1] = MPI_Wtime();
81 
82  std::cout << "Exchanging ghost cells..." << std::endl;
83  rval = pc.exchange_ghost_cells( -1, -1, 0, 0, true, true );CHECK_ERR( rval );
84  times[2] = MPI_Wtime();
85 
86  // pc.list_entities(0,-1);
87 
88  rval = pc.exchange_ghost_cells( -1, 0, 1, 0, true );
89  if( MB_SUCCESS != rval )
90  {
91  std::string err;
92  mbint.get_last_error( err );
93  std::cerr << "Error: proc " << pc.rank() << ": " << err << std::endl;
94  }
95  CHECK_ERR( rval );
96  times[3] = MPI_Wtime();
97 
98  // pc.list_entities(0,-1);
99 
100  // Create a tag, used in exchange_tags
101  int def_val = 1.0;
102  rval = mbint.tag_get_handle( "test_tag", 1, MB_TYPE_DOUBLE, tag, MB_TAG_DENSE | MB_TAG_EXCL, &def_val );CHECK_ERR( rval );
103 
104  Range empty_range;
105  if( !pc.rank() ) std::cout << "Exchanging tags: ";
106  for( int i = 0; i < ITERS; i++ )
107  {
108  if( !pc.rank() ) std::cout << i << ";";
109  pc.exchange_tags( tag, empty_range );CHECK_ERR( rval );
110  }
111  if( !pc.rank() ) std::cout << std::endl;
112  times[4] = MPI_Wtime();
113 
114  for( int i = 4; i >= 1; i-- )
115  times[i] -= times[i - 1];
116 
117  double tottimes[5] = { 0.0 };
118  MPI_Reduce( times + 1, tottimes + 1, 4, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
119 
120  if( !pc.rank() )
121  std::cout << "Times: " << std::endl
122  << "Create: " << times[1] << std::endl
123  << "Resolve verts: " << times[2] << std::endl
124  << "Resolve non-verts: " << times[3] << std::endl
125  << "Exchange ghosts: " << times[4] << std::endl;
126 }