Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
GenLargeMesh.cpp
Go to the documentation of this file.
1 /** @example GenLargeMesh.cpp \n
2  * \brief Create a large structured mesh, partitioned \n
3  *
4  * It shows how to create a mesh on the fly, on multiple processors
5  * Each processor will create its version of a block mesh, partitioned
6  * as AxBxC blocks. Each block will be with blockSize^3 hexahedrons, and
7  * will get a different PARALLEL_PARTITION tag
8  *
9  * The number of tasks will be MxNxK, and it must match the mpi size
10  * Each task p will generate its mesh at location (m,n,k), and it is
11  * lexicographic ordering: rank = m + n * M + k * M * N
12  *
13  * By default M=1, N=1, K=1, so by default it should be launched on 1 proc
14  * By default, blockSize is 4, and A=2, B=2, C=2, so each task will generate locally
15  * blockSize^3 x A x B x C hexahedrons (value = 64x8 = 512 hexas, in 8 partitions)
16  * (if -t, multiple by 6 for total number of cells/tets)
17  * The total number of partitions will be A*B*C*M*N*K (default 8)
18  *
19  * Each part in partition will get a proper tag, and the numbering is in
20  * lexicographic ordering; x direction varies first, then y, then z.
21  * The same principle is used for global id numbering of the nodes and cells.
22  * (x varies first)
23  *
24  * The vertices will get a proper global id, which will be used to resolve the
25  * shared entities
26  * The output will be written in parallel, and we will try sizes as big as we can
27  * (up to a billion vertices, because we use int for global ids)
28  *
29  * Within each partition, the hexas entity handles will be contiguous, and also the
30  * vertices; The global id will be determined easily, for a vertex, but the entity
31  * handle space will be more interesting to handle, within a partition (we want
32  * contiguous handles within a partition). After merging the vertices, some fragmentation
33  * will occur in the vertices handle space, within each partition.
34  *
35  * To run: ./GenLargeMesh
36  *
37  * When launched on more procs, you have to make sure
38  * num procs = M*N*K
39  *
40  * So you can launch with
41  * mpiexec -np 8 ./GenLargeMesh -M 2 -N 2 -K 2
42  *
43  * We also added -q option; it works now only for hexa mesh, it will generate
44  * quadratic hex27 elements
45  *
46  * -t option will generate tetrahedrons instead of hexahedra. Each hexahedra is
47  * decomposed into 6 tetrahedrons.
48  *
49  * -f option will also generate all edges and faces in the model.
50  * -w will use a newer merging method locally. Merging is necessary to merge
51  * vertices on the local task, and the new method does not use a searching tree,
52  * but rather the global id set on the vertices in a consistent manner
53  *
54  * -d and -i options can be used to add some artificial tags on the model;
55  * you can have multiple -d and -i options; -i <tag_name> will set an integer
56  * tag with name tag_name on the vertices; -d < tag_name2> will generate
57  * double tags on cells (3d elements). You can have multiple tags, like
58  * -i tag1 -i tag2 -i tag3 -d tag4
59  *
60  * -x, -y, -z options will control the geometric dimensions of the final mesh, in
61  * x, y and z directions.
62  *
63  * -o <out_file> controls the name of the output file; it needs to have extension h5m,
64  * because the file is written in parallel.
65  *
66  * -k will keep the edges and faces that are generated as part of resolving shared entities
67  * (by default these edges and faces are removed); when -f option is used, the -k option is
68  * enabled too (so no faces and edges are deleted)
69  *
70  */
71 
72 #include "moab/Core.hpp"
73 #include "moab/ProgOptions.hpp"
74 #include "moab/MeshGeneration.hpp"
75 #ifdef MOAB_HAVE_MPI
76 #include "moab/ParallelComm.hpp"
77 #include "MBParallelConventions.h"
78 #endif
79 
80 #include <ctime>
81 #include <iostream>
82 #include <vector>
83 
84 using namespace moab;
85 using namespace std;
86 
87 int main( int argc, char** argv )
88 {
89 
90  bool nosave = false;
91 
92 #ifdef MOAB_HAVE_MPI
93  MPI_Init( &argc, &argv );
94 #endif
96  // default options
97  bopts.A = bopts.B = bopts.C = 2;
98  bopts.M = bopts.N = bopts.K = 1;
99  bopts.blockSize = 4;
100  bopts.xsize = bopts.ysize = bopts.zsize = 1.;
101  bopts.ui = CartVect( 1., 0, 0. );
102  bopts.uj = CartVect( 0., 1., 0. );
103  bopts.uk = CartVect( 0., 0., 1. );
104  bopts.newMergeMethod = bopts.quadratic = bopts.keep_skins = bopts.tetra = false;
105  bopts.adjEnts = bopts.parmerge = false;
106  bopts.GL = 0;
107 
108  ProgOptions opts;
109 
110  opts.addOpt< int >( string( "blockSize,b" ), string( "Block size of mesh (default=4)" ), &bopts.blockSize );
111  opts.addOpt< int >( string( "xproc,M" ), string( "Number of processors in x dir (default=1)" ), &bopts.M );
112  opts.addOpt< int >( string( "yproc,N" ), string( "Number of processors in y dir (default=1)" ), &bopts.N );
113  opts.addOpt< int >( string( "zproc,K" ), string( "Number of processors in z dir (default=1)" ), &bopts.K );
114 
115  opts.addOpt< int >( string( "xblocks,A" ), string( "Number of blocks on a task in x dir (default=2)" ), &bopts.A );
116  opts.addOpt< int >( string( "yblocks,B" ), string( "Number of blocks on a task in y dir (default=2)" ), &bopts.B );
117  opts.addOpt< int >( string( "zblocks,C" ), string( "Number of blocks on a task in x dir (default=2)" ), &bopts.C );
118 
119  opts.addOpt< double >( string( "xsize,x" ), string( "Total size in x direction (default=1.)" ), &bopts.xsize );
120  opts.addOpt< double >( string( "ysize,y" ), string( "Total size in y direction (default=1.)" ), &bopts.ysize );
121  opts.addOpt< double >( string( "zsize,z" ), string( "Total size in z direction (default=1.)" ), &bopts.zsize );
122 
123  opts.addOpt< void >( "newMerge,w", "use new merging method", &bopts.newMergeMethod );
124 
125  opts.addOpt< void >( "quadratic,q", "use hex 27 elements", &bopts.quadratic );
126 
127  opts.addOpt< void >( "keep_skins,k", "keep skins with shared entities", &bopts.keep_skins );
128 
129  opts.addOpt< void >( "tetrahedrons,t", "generate tetrahedrons", &bopts.tetra );
130 
131  opts.addOpt< void >( "faces_edges,f", "create all faces and edges", &bopts.adjEnts );
132 
133  opts.addOpt< int >( string( "ghost_layers,g" ), string( "Number of ghost layers (default=0)" ), &bopts.GL );
134 
135  vector< string > intTagNames;
136  string firstIntTag;
137  opts.addOpt< string >( "int_tag_vert,i", "add integer tag on vertices", &firstIntTag );
138 
139  vector< string > doubleTagNames;
140  string firstDoubleTag;
141  opts.addOpt< string >( "double_tag_cell,d", "add double tag on cells", &firstDoubleTag );
142 
143  string outFileName = "GenLargeMesh.h5m";
144  opts.addOpt< string >( "outFile,o", "Specify the output file name string (default GenLargeMesh.h5m)",
145  &outFileName );
146 
147 #ifdef MOAB_HAVE_HDF5_PARALLEL
148  bool readb = false;
149  opts.addOpt< void >( "readback,r", "read back the generated mesh", &readb );
150 
151  bool readAndGhost = false;
152  opts.addOpt< void >( "readAndGhost,G", "read back the generated mesh and ghost one layer", &readAndGhost );
153 #endif
154 
155  opts.addOpt< void >( "parallel_merge,p", "use parallel mesh merge, not vertex ID based merge", &bopts.parmerge );
156 
157  opts.addOpt< void >( "no_save,n", "do not save the file", &nosave );
158 
159  opts.parseCommandLine( argc, argv );
160 
161  opts.getOptAllArgs( "int_tag_vert,i", intTagNames );
162  opts.getOptAllArgs( "double_tag_cell,d", doubleTagNames );
163 
164  if( bopts.adjEnts ) bopts.keep_skins = true; // Do not delete anything
165 
166  Interface* mb = new( std::nothrow ) Core;
167  if( NULL == mb )
168  {
169 #ifdef MOAB_HAVE_MPI
170  MPI_Finalize();
171 #endif
172  return 1;
173  }
174 
175  int rank = 0, size = 1;
176 
177 #ifdef MOAB_HAVE_MPI
178  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
179  MPI_Comm_size( MPI_COMM_WORLD, &size );
180 #endif
181 
182  EntityHandle fileset;
183  ErrorCode rval = mb->create_meshset( MESHSET_SET, fileset );MB_CHK_ERR( rval );
184 #ifdef MOAB_HAVE_MPI
185  ParallelComm* pc = new ParallelComm( mb, MPI_COMM_WORLD );
186  MeshGeneration* mgen = new MeshGeneration( mb, pc, fileset );
187 #else
188  MeshGeneration* mgen = new MeshGeneration( mb, 0, fileset );
189 #endif
190 
191  clock_t tt = clock();
192 
193  rval = mgen->BrickInstance( bopts );MB_CHK_ERR( rval );
194 
195  Range all3dcells;
196  rval = mb->get_entities_by_dimension( fileset, 3, all3dcells );MB_CHK_ERR( rval );
197 
198  if( 0 == rank )
199  {
200  cout << "generate local mesh: " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds" << endl;
201  tt = clock();
202  cout << "number of elements on rank 0: " << all3dcells.size() << endl;
203  cout << "Total number of elements " << all3dcells.size() * size << endl;
204  cout << "Element type: " << ( bopts.tetra ? "MBTET" : "MBHEX" )
205  << " order:" << ( bopts.quadratic ? "quadratic" : "linear" ) << endl;
206  }
207  Range verts;
208  rval = mb->get_entities_by_dimension( 0, 0, verts );MB_CHK_SET_ERR( rval, "Can't get all vertices" );
209 
210  if( !nosave )
211  {
212 #ifdef MOAB_HAVE_HDF5_PARALLEL
213  rval = mb->write_file( outFileName.c_str(), 0, ";;PARALLEL=WRITE_PART;CPUTIME;", &fileset, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel" );
214 #else
215  // should be a vtk file, actually, maybe make sure of that
216  rval = mb->write_file( outFileName.c_str(), 0, "", &fileset, 1 );MB_CHK_SET_ERR( rval, "Can't write in serial" );
217 #endif
218  if( 0 == rank )
219  {
220  cout << "write file " << outFileName << " in " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds"
221  << endl;
222  tt = clock();
223  }
224  }
225  // delete the mesh that we already have in-memory
226  size_t nLocalVerts = verts.size();
227  size_t nLocalCells = all3dcells.size();
228 
229  mb->delete_mesh();
230 
231 #ifdef MOAB_HAVE_HDF5_PARALLEL
232  if( !nosave && readb )
233  {
234  // now recreate a core instance and load the file we just wrote out to verify
235  Core mb2;
236  std::string read_opts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_"
237  "SHARED_ENTS;CPUTIME;" );
238  if( readAndGhost ) read_opts += "PARALLEL_GHOSTS=3.0.1;";
239  rval = mb2.load_file( outFileName.c_str(), 0, read_opts.c_str() );MB_CHK_SET_ERR( rval, "Can't read in parallel" );
240  if( 0 == rank )
241  {
242  cout << "read back file " << outFileName << " with options: \n"
243  << read_opts << " in " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds" << endl;
244  tt = clock();
245  }
246  moab::Range nverts, ncells;
247  rval = mb2.get_entities_by_dimension( 0, 0, nverts );MB_CHK_SET_ERR( rval, "Can't get all vertices" );
248  rval = mb2.get_entities_by_dimension( 0, 3, ncells );MB_CHK_SET_ERR( rval, "Can't get all 3d cells elements" );
249 
250  if( readAndGhost && size > 1 )
251  {
252  // filter out the ghost nodes and elements, for comparison with original mesh
253  // first get the parallel comm
254  ParallelComm* pcomm2 = ParallelComm::get_pcomm( &mb2, 0 );
255  if( NULL == pcomm2 ) MB_SET_ERR( MB_FAILURE, "can't get parallel comm." );
256  rval = pcomm2->filter_pstatus( nverts, PSTATUS_GHOST, PSTATUS_NOT );MB_CHK_SET_ERR( rval, "Can't filter ghost vertices" );
257  rval = pcomm2->filter_pstatus( ncells, PSTATUS_GHOST, PSTATUS_NOT );MB_CHK_SET_ERR( rval, "Can't filter ghost cells" );
258  }
259  if( nverts.size() != nLocalVerts && ncells.size() != nLocalCells )
260  {
261  MB_SET_ERR( MB_FAILURE, "Reading back the output file led to inconsistent number of entities." );
262  }
263 
264  // delete the mesh that we already have in-memory
265  mb2.delete_mesh();
266  }
267 #endif
268 
269 #ifdef MOAB_HAVE_MPI
270  MPI_Finalize();
271 #endif
272  return 0;
273 }