MOAB: Mesh Oriented datABase  (version 5.5.0)
parallel_write_test.cpp
Go to the documentation of this file.
1 #include "moab/Core.hpp"
2 #include "moab/ParallelComm.hpp"
3 #include "MBTagConventions.hpp"
4 #include "moab_mpi.h"
5 #include <cstdlib>
6 #include <iostream>
7 #include <ctime>
8 #include <cstring>
9 #include <cmath>
10 #include <cassert>
11 #include <cstdio>
12 #include <sstream>
13 
14 using namespace moab;
15 
16 #define TPRINT( A ) tprint( ( A ) )
17 static void tprint( const char* A )
18 {
19  int rank;
20  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
21  char buffer[128];
22  sprintf( buffer, "%02d: %6.2f: %s\n", rank, (double)clock() / CLOCKS_PER_SEC, A );
23  fputs( buffer, stderr );
24 }
25 
26 const int DEFAULT_INTERVALS = 2;
27 const char* DEFAULT_FILE_NAME = "parallel_write_test.h5m";
28 
29 // Create mesh for each processor that is a cube of hexes
30 // with the specified interval count along each edge. Cubes
31 // of mesh will be positioned and vertex IDs assigned such that
32 // there are shared entities. If the cubic root of the number
33 // of processors is a whole number, then each processors mesh
34 // will be a cube in a grid with that number of processor blocks
35 // along each edge. Otherwise processor blocks will be arranged
36 // within the subset of the grid that is the ceiling of the cubic
37 // root of the comm size such that there are no disjoint regions.
38 ErrorCode generate_mesh( Interface& moab, int intervals );
39 
40 const char args[] = "[-i <intervals>] [-o <filename>] [-L <filename>] [-g <n>]";
41 void help()
42 {
43  std::cout << "parallel_write_test " << args << std::endl
44  << " -i <N> Each processor owns an NxNxN cube of hex elements (default: " << DEFAULT_INTERVALS << ")"
45  << std::endl
46  << " -o <name> Retain output file and name it as specified." << std::endl
47  << " -L <name> Write local mesh to file name prefixed with MPI rank" << std::endl
48  << " -g <n> Specify writer debug output level" << std::endl
49  << " -R Skip resolve of shared entities (interface ents will be duplicated in file)" << std::endl
50  << std::endl
51  << "This program creates a (non-strict) subset of a regular hex mesh "
52  "such that the mesh is already partitioned, and then attempts to "
53  "write that mesh using MOAB's parallel HDF5 writer. The mesh size "
54  "will scale with the number of processors and the number of elements "
55  "per processor (the latter is a function of the value specified "
56  "with the '-i' flag.)"
57  << std::endl
58  << std::endl
59  << "Let N = ceil(cbrt(P)), where P is the number of processes. "
60  "The mesh will be some subset of a cube with one corner at the "
61  "origin and the other at (N,N,N). Each processor will own a "
62  "non-overlapping 1x1x1 unit block of mesh within that cube. "
63  "If P is a power of 3, then the entire NxNxN cube will be "
64  "filled with hex elements. Otherwise, some connected subset "
65  "of the cube will be meshed. Each processor is assigned a "
66  "sub-block of the cube by rank where the blocks are enumerated "
67  "sequentally with x increasing most rapidly and z least rapidly."
68  << std::endl
69  << std::endl
70  << "The size of the mesh owned by each processor is controlled by "
71  "the number of intervals along each edge of its block of mesh. "
72  "If each block has N intervals, than each processor will have "
73  "N^3 hex elements."
74  << std::endl
75  << std::endl;
76 }
77 
78 int main( int argc, char* argv[] )
79 {
80  int ierr = MPI_Init( &argc, &argv );
81  if( ierr )
82  {
83  std::cerr << "MPI_Init failed with error code: " << ierr << std::endl;
84  return ierr;
85  }
86  int rank;
87  ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
88  if( ierr )
89  {
90  std::cerr << "MPI_Comm_rank failed with error code: " << ierr << std::endl;
91  return ierr;
92  }
93  int size;
94  ierr = MPI_Comm_size( MPI_COMM_WORLD, &size );
95  if( ierr )
96  {
97  std::cerr << "MPI_Comm_size failed with error code: " << ierr << std::endl;
98  return ierr;
99  }
100 
101  // settings controlled by CL flags
102  const char* output_file_name = 0;
103  const char* indiv_file_name = 0;
104  int intervals = 0, debug_level = 0;
105  // state for CL flag processing
106  bool expect_intervals = false;
107  bool expect_file_name = false;
108  bool expect_indiv_file = false;
109  bool skip_resolve_shared = false;
110  bool expect_debug_level = false;
111  // process CL args
112  for( int i = 1; i < argc; ++i )
113  {
114  if( expect_intervals )
115  {
116  char* endptr = 0;
117  intervals = (int)strtol( argv[i], &endptr, 0 );
118  if( *endptr || intervals < 1 )
119  {
120  std::cerr << "Invalid block interval value: " << argv[i] << std::endl;
121  return 1;
122  }
123  expect_intervals = false;
124  }
125  else if( expect_indiv_file )
126  {
127  indiv_file_name = argv[i];
128  expect_indiv_file = false;
129  }
130  else if( expect_file_name )
131  {
132  output_file_name = argv[i];
133  expect_file_name = false;
134  }
135  else if( expect_debug_level )
136  {
137  debug_level = atoi( argv[i] );
138  if( debug_level < 1 )
139  {
140  std::cerr << "Invalid argument following -g flag: \"" << argv[i] << '"' << std::endl;
141  return 1;
142  }
143  expect_debug_level = false;
144  }
145  else if( !strcmp( "-i", argv[i] ) )
146  expect_intervals = true;
147  else if( !strcmp( "-o", argv[i] ) )
148  expect_file_name = true;
149  else if( !strcmp( "-L", argv[i] ) )
150  expect_indiv_file = true;
151  else if( !strcmp( "-R", argv[i] ) )
152  skip_resolve_shared = true;
153  else if( !strcmp( "-g", argv[i] ) )
154  expect_debug_level = true;
155  else if( !strcmp( "-h", argv[i] ) )
156  {
157  help();
158  return 0;
159  }
160  else
161  {
162  std::cerr << "Unexpected argument: " << argv[i] << std::endl
163  << "Usage: " << argv[0] << " " << args << std::endl
164  << " " << argv[0] << " -h" << std::endl
165  << " Try '-h' for help." << std::endl;
166  return 1;
167  }
168  }
169  // Check for missing argument after last CL flag
170  if( expect_file_name || expect_intervals || expect_indiv_file )
171  {
172  std::cerr << "Missing argument for '" << argv[argc - 1] << "'" << std::endl;
173  return 1;
174  }
175  // If intervals weren't specified, use default
176  if( intervals == 0 )
177  {
178  std::cout << "Using default interval count: " << DEFAULT_INTERVALS << std::endl;
179  intervals = DEFAULT_INTERVALS;
180  }
181  // If no output file was specified, use default one and note that
182  // we need to delete it when the test completes.
183  bool keep_output_file = true;
184  if( !output_file_name )
185  {
186  output_file_name = DEFAULT_FILE_NAME;
187  keep_output_file = false;
188  }
189 
190  // Create mesh
191  TPRINT( "Generating mesh" );
192  double gen_time = MPI_Wtime();
193  Core mb;
194  Interface& moab = mb;
195  ErrorCode rval = generate_mesh( moab, intervals );
196  if( MB_SUCCESS != rval )
197  {
198  std::cerr << "Mesh creation failed with error code: " << rval << std::endl;
199  return (int)rval;
200  }
201  gen_time = MPI_Wtime() - gen_time;
202 
203  // Write out local mesh on each processor if requested.
204  if( indiv_file_name )
205  {
206  TPRINT( "Writing individual file" );
207  char buffer[64];
208  int width = (int)ceil( log10( size ) );
209  sprintf( buffer, "%0*d-", width, rank );
210  std::string name( buffer );
211  name += indiv_file_name;
212  rval = moab.write_file( name.c_str() );
213  if( MB_SUCCESS != rval )
214  {
215  std::cerr << "Failed to write file: " << name << std::endl;
216  return (int)rval;
217  }
218  }
219 
220  double res_time = MPI_Wtime();
221  Range hexes;
222  moab.get_entities_by_type( 0, MBHEX, hexes );
223  if( !skip_resolve_shared )
224  {
225  TPRINT( "Resolving shared entities" );
226  // Negotiate shared entities using vertex global IDs
227  ParallelComm* pcomm = new ParallelComm( &moab, MPI_COMM_WORLD );
228  rval = pcomm->resolve_shared_ents( 0, hexes, 3, 0 );
229  if( MB_SUCCESS != rval )
230  {
231  std::cerr << "ParallelComm::resolve_shared_ents failed" << std::endl;
232  return rval;
233  }
234  }
235  res_time = MPI_Wtime() - res_time;
236 
237  TPRINT( "Beginning parallel write" );
238  double write_time = MPI_Wtime();
239  // Do parallel write
240  clock_t t = clock();
241  std::ostringstream opts;
242  opts << "PARALLEL=WRITE_PART";
243  if( debug_level > 0 ) opts << ";DEBUG_IO=" << debug_level;
244  rval = moab.write_file( output_file_name, "MOAB", opts.str().c_str() );
245  t = clock() - t;
246  if( MB_SUCCESS != rval )
247  {
248  std::string msg;
249  moab.get_last_error( msg );
250  std::cerr << "File creation failed with error code: " << moab.get_error_string( rval ) << std::endl;
251  std::cerr << "\t\"" << msg << '"' << std::endl;
252  return (int)rval;
253  }
254  write_time = MPI_Wtime() - write_time;
255 
256  double times[3] = { gen_time, res_time, write_time };
257  double max[3] = { 0, 0, 0 };
258  MPI_Reduce( times, max, 3, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
259 
260  // Clean up and summarize
261  if( 0 == rank )
262  {
263  double sec = (double)t / CLOCKS_PER_SEC;
264  std::cout << "Wrote " << hexes.size() * size << " hexes in " << sec << " seconds." << std::endl;
265 
266  if( !keep_output_file )
267  {
268  TPRINT( "Removing written file" );
269  remove( output_file_name );
270  }
271 
272  std::cout << "Wall time: generate: " << max[0] << ", resovle shared: " << max[1] << ", write_file: " << max[2]
273  << std::endl;
274  }
275 
276  TPRINT( "Finalizing MPI" );
277  return MPI_Finalize();
278 }
279 
280 #define IDX( i, j, k ) ( ( num_interval + 1 ) * ( ( num_interval + 1 ) * ( k ) + ( j ) ) + ( i ) )
281 
282 ErrorCode generate_mesh( Interface& moab, int num_interval )
283 {
284  int rank, size;
285  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
286  MPI_Comm_size( MPI_COMM_WORLD, &size );
287 
288  ErrorCode rval;
289  Tag global_id = moab.globalId_tag();
290 
291  // Each processor will own one cube of mesh within
292  // an 3D grid of cubes. Calculate the dimensions of
293  // that grid in numbers of cubes.
294  int root = 1;
295  while( root * root * root < size )
296  ++root;
297  int num_x_blocks = root;
298  int num_y_blocks = root - 1;
299  int num_z_blocks = root - 1;
300  if( num_x_blocks * num_y_blocks * num_z_blocks < size ) ++num_y_blocks;
301  if( num_x_blocks * num_y_blocks * num_z_blocks < size ) ++num_z_blocks;
302 
303  // calculate position of this processor in grid
304  int my_z_block = rank / ( num_x_blocks * num_y_blocks );
305  int rem = rank % ( num_x_blocks * num_y_blocks );
306  int my_y_block = rem / num_x_blocks;
307  int my_x_block = rem % num_x_blocks;
308 
309  // Each processor's cube of mesh will be num_iterval^3 elements
310  // and will be 1.0 units on a side
311 
312  // create vertices
313  const int num_x_vtx = num_interval * num_x_blocks + 1;
314  const int num_y_vtx = num_interval * num_y_blocks + 1;
315  const int x_offset = my_x_block * num_interval;
316  const int y_offset = my_y_block * num_interval;
317  const int z_offset = my_z_block * num_interval;
318  double step = 1.0 / num_interval;
319  std::vector< EntityHandle > vertices( ( num_interval + 1 ) * ( num_interval + 1 ) * ( num_interval + 1 ) );
320  std::vector< EntityHandle >::iterator v = vertices.begin();
321  for( int k = 0; k <= num_interval; ++k )
322  {
323  for( int j = 0; j <= num_interval; ++j )
324  {
325  for( int i = 0; i <= num_interval; ++i )
326  {
327  double coords[] = { my_x_block + i * step, my_y_block + j * step, my_z_block + k * step };
328  EntityHandle h;
329  rval = moab.create_vertex( coords, h );
330  if( MB_SUCCESS != rval ) return rval;
331 
332  int id = 1 + x_offset + i + ( y_offset + j ) * num_x_vtx + ( z_offset + k ) * num_x_vtx * num_y_vtx;
333  rval = moab.tag_set_data( global_id, &h, 1, &id );
334  if( MB_SUCCESS != rval ) return rval;
335 
336  assert( v != vertices.end() );
337  *v++ = h;
338  }
339  }
340  }
341 
342  // create hexes
343  for( int k = 0; k < num_interval; ++k )
344  {
345  for( int j = 0; j < num_interval; ++j )
346  {
347  for( int i = 0; i < num_interval; ++i )
348  {
349  assert( IDX( i + 1, j + 1, k + 1 ) < (int)vertices.size() );
350  const EntityHandle conn[] = { vertices[IDX( i, j, k )],
351  vertices[IDX( i + 1, j, k )],
352  vertices[IDX( i + 1, j + 1, k )],
353  vertices[IDX( i, j + 1, k )],
354  vertices[IDX( i, j, k + 1 )],
355  vertices[IDX( i + 1, j, k + 1 )],
356  vertices[IDX( i + 1, j + 1, k + 1 )],
357  vertices[IDX( i, j + 1, k + 1 )] };
358  EntityHandle elem;
359  rval = moab.create_element( MBHEX, conn, 8, elem );
360  if( MB_SUCCESS != rval ) return rval;
361  }
362  }
363  }
364  /*
365  // create interface quads
366  for (int j = 0; j < num_interval; ++j) {
367  for (int i = 0; i < num_interval; ++i) {
368  EntityHandle h;
369 
370  const EntityHandle conn1[] = { vertices[IDX(i, j, 0)],
371  vertices[IDX(i+1,j, 0)],
372  vertices[IDX(i+1,j+1,0)],
373  vertices[IDX(i, j+1,0)] };
374  rval = moab.create_element( MBQUAD, conn1, 4, h );
375  if (MB_SUCCESS != rval)
376  return rval;
377 
378  const EntityHandle conn2[] = { vertices[IDX(i, j, num_interval)],
379  vertices[IDX(i+1,j, num_interval)],
380  vertices[IDX(i+1,j+1,num_interval)],
381  vertices[IDX(i, j+1,num_interval)] };
382  rval = moab.create_element( MBQUAD, conn2, 4, h );
383  if (MB_SUCCESS != rval)
384  return rval;
385  }
386  }
387  for (int k = 0; k < num_interval; ++k) {
388  for (int i = 0; i < num_interval; ++i) {
389  EntityHandle h;
390 
391  const EntityHandle conn1[] = { vertices[IDX(i, 0,k )],
392  vertices[IDX(i+1,0,k )],
393  vertices[IDX(i+1,0,k+1)],
394  vertices[IDX(i, 0,k+1)] };
395  rval = moab.create_element( MBQUAD, conn1, 4, h );
396  if (MB_SUCCESS != rval)
397  return rval;
398 
399  const EntityHandle conn2[] = { vertices[IDX(i, num_interval,k )],
400  vertices[IDX(i+1,num_interval,k )],
401  vertices[IDX(i+1,num_interval,k+1)],
402  vertices[IDX(i, num_interval,k+1)] };
403  rval = moab.create_element( MBQUAD, conn2, 4, h );
404  if (MB_SUCCESS != rval)
405  return rval;
406  }
407  }
408  for (int k = 0; k < num_interval; ++k) {
409  for (int j = 0; j < num_interval; ++j) {
410  EntityHandle h;
411 
412  const EntityHandle conn1[] = { vertices[IDX(0,j, k )],
413  vertices[IDX(0,j+1,k )],
414  vertices[IDX(0,j+1,k+1)],
415  vertices[IDX(0,j, k+1)] };
416  rval = moab.create_element( MBQUAD, conn1, 4, h );
417  if (MB_SUCCESS != rval)
418  return rval;
419 
420  const EntityHandle conn2[] = { vertices[IDX(num_interval,j, k )],
421  vertices[IDX(num_interval,j+1,k )],
422  vertices[IDX(num_interval,j+1,k+1)],
423  vertices[IDX(num_interval,j, k+1)] };
424  rval = moab.create_element( MBQUAD, conn2, 4, h );
425  if (MB_SUCCESS != rval)
426  return rval;
427  }
428  }
429  */
430  return MB_SUCCESS;
431 }