16 #define TPRINT( A ) tprint( ( A ) )
22 sprintf(
buffer,
"%02d: %6.2f: %s\n",
rank, (
double)clock() / CLOCKS_PER_SEC, A );
40 const char args[] =
"[-i <intervals>] [-o <filename>] [-L <filename>] [-g <n>]";
43 std::cout <<
"parallel_write_test " <<
args << std::endl
44 <<
" -i <N> Each processor owns an NxNxN cube of hex elements (default: " <<
DEFAULT_INTERVALS <<
")"
46 <<
" -o <name> Retain output file and name it as specified." << std::endl
47 <<
" -L <name> Write local mesh to file name prefixed with MPI rank" << std::endl
48 <<
" -g <n> Specify writer debug output level" << std::endl
49 <<
" -R Skip resolve of shared entities (interface ents will be duplicated in file)" << std::endl
51 <<
"This program creates a (non-strict) subset of a regular hex mesh "
52 "such that the mesh is already partitioned, and then attempts to "
53 "write that mesh using MOAB's parallel HDF5 writer. The mesh size "
54 "will scale with the number of processors and the number of elements "
55 "per processor (the latter is a function of the value specified "
56 "with the '-i' flag.)"
59 <<
"Let N = ceil(cbrt(P)), where P is the number of processes. "
60 "The mesh will be some subset of a cube with one corner at the "
61 "origin and the other at (N,N,N). Each processor will own a "
62 "non-overlapping 1x1x1 unit block of mesh within that cube. "
63 "If P is a power of 3, then the entire NxNxN cube will be "
64 "filled with hex elements. Otherwise, some connected subset "
65 "of the cube will be meshed. Each processor is assigned a "
66 "sub-block of the cube by rank where the blocks are enumerated "
67 "sequentally with x increasing most rapidly and z least rapidly."
70 <<
"The size of the mesh owned by each processor is controlled by "
71 "the number of intervals along each edge of its block of mesh. "
72 "If each block has N intervals, than each processor will have "
78 int main(
int argc,
char* argv[] )
80 int ierr = MPI_Init( &argc, &argv );
83 std::cerr <<
"MPI_Init failed with error code: " <<
ierr << std::endl;
90 std::cerr <<
"MPI_Comm_rank failed with error code: " <<
ierr << std::endl;
97 std::cerr <<
"MPI_Comm_size failed with error code: " <<
ierr << std::endl;
102 const char* output_file_name = 0;
103 const char* indiv_file_name = 0;
104 int intervals = 0, debug_level = 0;
106 bool expect_intervals =
false;
107 bool expect_file_name =
false;
108 bool expect_indiv_file =
false;
109 bool skip_resolve_shared =
false;
110 bool expect_debug_level =
false;
112 for(
int i = 1; i < argc; ++i )
114 if( expect_intervals )
117 intervals = (int)strtol( argv[i], &endptr, 0 );
118 if( *endptr || intervals < 1 )
120 std::cerr <<
"Invalid block interval value: " << argv[i] << std::endl;
123 expect_intervals =
false;
125 else if( expect_indiv_file )
127 indiv_file_name = argv[i];
128 expect_indiv_file =
false;
130 else if( expect_file_name )
132 output_file_name = argv[i];
133 expect_file_name =
false;
135 else if( expect_debug_level )
137 debug_level = atoi( argv[i] );
138 if( debug_level < 1 )
140 std::cerr <<
"Invalid argument following -g flag: \"" << argv[i] <<
'"' << std::endl;
143 expect_debug_level =
false;
145 else if( !strcmp(
"-i", argv[i] ) )
146 expect_intervals =
true;
147 else if( !strcmp(
"-o", argv[i] ) )
148 expect_file_name =
true;
149 else if( !strcmp(
"-L", argv[i] ) )
150 expect_indiv_file =
true;
151 else if( !strcmp(
"-R", argv[i] ) )
152 skip_resolve_shared =
true;
153 else if( !strcmp(
"-g", argv[i] ) )
154 expect_debug_level =
true;
155 else if( !strcmp(
"-h", argv[i] ) )
162 std::cerr <<
"Unexpected argument: " << argv[i] << std::endl
163 <<
"Usage: " << argv[0] <<
" " <<
args << std::endl
164 <<
" " << argv[0] <<
" -h" << std::endl
165 <<
" Try '-h' for help." << std::endl;
170 if( expect_file_name || expect_intervals || expect_indiv_file )
172 std::cerr <<
"Missing argument for '" << argv[argc - 1] <<
"'" << std::endl;
183 bool keep_output_file =
true;
184 if( !output_file_name )
187 keep_output_file =
false;
191 TPRINT(
"Generating mesh" );
192 double gen_time = MPI_Wtime();
198 std::cerr <<
"Mesh creation failed with error code: " << rval << std::endl;
201 gen_time = MPI_Wtime() - gen_time;
204 if( indiv_file_name )
206 TPRINT(
"Writing individual file" );
208 int width = (int)ceil( log10(
size ) );
210 std::string name(
buffer );
211 name += indiv_file_name;
212 rval =
moab.write_file( name.c_str() );
215 std::cerr <<
"Failed to write file: " << name << std::endl;
220 double res_time = MPI_Wtime();
222 moab.get_entities_by_type( 0,
MBHEX, hexes );
223 if( !skip_resolve_shared )
225 TPRINT(
"Resolving shared entities" );
231 std::cerr <<
"ParallelComm::resolve_shared_ents failed" << std::endl;
235 res_time = MPI_Wtime() - res_time;
237 TPRINT(
"Beginning parallel write" );
238 double write_time = MPI_Wtime();
241 std::ostringstream opts;
242 opts <<
"PARALLEL=WRITE_PART";
243 if( debug_level > 0 ) opts <<
";DEBUG_IO=" << debug_level;
244 rval =
moab.write_file( output_file_name,
"MOAB", opts.str().c_str() );
249 moab.get_last_error( msg );
250 std::cerr <<
"File creation failed with error code: " <<
moab.get_error_string( rval ) << std::endl;
251 std::cerr <<
"\t\"" << msg <<
'"' << std::endl;
254 write_time = MPI_Wtime() - write_time;
256 double times[3] = { gen_time, res_time, write_time };
257 double max[3] = { 0, 0, 0 };
258 MPI_Reduce( times, max, 3, MPI_DOUBLE, MPI_MAX, 0,
MPI_COMM_WORLD );
263 double sec = (double)
t / CLOCKS_PER_SEC;
264 std::cout <<
"Wrote " << hexes.
size() *
size <<
" hexes in " << sec <<
" seconds." << std::endl;
266 if( !keep_output_file )
268 TPRINT(
"Removing written file" );
269 remove( output_file_name );
272 std::cout <<
"Wall time: generate: " << max[0] <<
", resovle shared: " << max[1] <<
", write_file: " << max[2]
276 TPRINT(
"Finalizing MPI" );
277 return MPI_Finalize();
280 #define IDX( i, j, k ) ( ( num_interval + 1 ) * ( ( num_interval + 1 ) * ( k ) + ( j ) ) + ( i ) )
289 Tag global_id =
moab.globalId_tag();
295 while( root * root * root <
size )
297 int num_x_blocks = root;
298 int num_y_blocks = root - 1;
299 int num_z_blocks = root - 1;
300 if( num_x_blocks * num_y_blocks * num_z_blocks <
size ) ++num_y_blocks;
301 if( num_x_blocks * num_y_blocks * num_z_blocks <
size ) ++num_z_blocks;
304 int my_z_block =
rank / ( num_x_blocks * num_y_blocks );
305 int rem =
rank % ( num_x_blocks * num_y_blocks );
306 int my_y_block = rem / num_x_blocks;
307 int my_x_block = rem % num_x_blocks;
313 const int num_x_vtx = num_interval * num_x_blocks + 1;
314 const int num_y_vtx = num_interval * num_y_blocks + 1;
315 const int x_offset = my_x_block * num_interval;
316 const int y_offset = my_y_block * num_interval;
317 const int z_offset = my_z_block * num_interval;
318 double step = 1.0 / num_interval;
319 std::vector< EntityHandle > vertices( ( num_interval + 1 ) * ( num_interval + 1 ) * ( num_interval + 1 ) );
320 std::vector< EntityHandle >::iterator v = vertices.begin();
321 for(
int k = 0; k <= num_interval; ++k )
323 for(
int j = 0; j <= num_interval; ++j )
325 for(
int i = 0; i <= num_interval; ++i )
327 double coords[] = { my_x_block + i * step, my_y_block + j * step, my_z_block + k * step };
329 rval =
moab.create_vertex( coords, h );
332 int id = 1 + x_offset + i + ( y_offset + j ) * num_x_vtx + ( z_offset + k ) * num_x_vtx * num_y_vtx;
333 rval =
moab.tag_set_data( global_id, &h, 1, &
id );
336 assert( v != vertices.end() );
343 for(
int k = 0; k < num_interval; ++k )
345 for(
int j = 0; j < num_interval; ++j )
347 for(
int i = 0; i < num_interval; ++i )
349 assert(
IDX( i + 1, j + 1, k + 1 ) < (
int)vertices.size() );
351 vertices[
IDX( i + 1, j, k )],
352 vertices[
IDX( i + 1, j + 1, k )],
353 vertices[
IDX( i, j + 1, k )],
354 vertices[
IDX( i, j, k + 1 )],
355 vertices[
IDX( i + 1, j, k + 1 )],
356 vertices[
IDX( i + 1, j + 1, k + 1 )],
357 vertices[
IDX( i, j + 1, k + 1 )] };
359 rval =
moab.create_element(
MBHEX, conn, 8, elem );