14 moab::HypreParMatrix& A,
15 moab::HypreParVector& sol,
16 moab::HypreParVector& rhs,
17 moab::HypreParVector& exactsol );
19 int main(
int argc,
char* argv[] )
24 int nx = 5, ny = 5, nz = 5;
26 MPI_Init( &argc, &argv );
43 moab::HypreParMatrix A( pcomm );
44 moab::HypreParVector x( pcomm ), b( pcomm ), xexact( pcomm );
50 cout <<
"Press enter to continue" << endl;
60 cerr <<
"Usage:" << endl
61 <<
"\t" << argv[0] <<
" nx ny nz" << endl
62 <<
" where nx, ny and nz are the local sub-block dimensions, or" << endl;
69 cout <<
"Using command:"
70 <<
"\t" << argv[0] <<
" nx ny nz" << endl;
73 times[0] = MPI_Wtime();
84 times[1] = MPI_Wtime() - times[0];
85 const bool useCG =
true;
86 const bool useAMG =
false;
91 times[2] = MPI_Wtime();
92 moab::HypreSolver *lsSolver, *psSolver;
96 lsSolver =
new moab::HyprePCG( A );
97 moab::HyprePCG* cgSolver =
dynamic_cast< moab::HyprePCG*
>( lsSolver );
99 cgSolver->SetMaxIter( max_iter );
100 cgSolver->SetLogging( 1 );
101 cgSolver->Verbosity( 1 );
102 cgSolver->SetResidualConvergenceOptions( 3, 0.0 );
106 lsSolver =
new moab::HypreGMRES( A );
107 moab::HypreGMRES* gmresSolver =
dynamic_cast< moab::HypreGMRES*
>( lsSolver );
109 gmresSolver->SetMaxIter( max_iter );
110 gmresSolver->SetLogging( 1 );
111 gmresSolver->Verbosity( 5 );
112 gmresSolver->SetKDim( 30 );
117 psSolver =
new moab::HypreBoomerAMG( A );
118 dynamic_cast< moab::HypreBoomerAMG*
>( psSolver )->SetSystemsOptions( 3 );
122 psSolver =
new moab::HypreParaSails( A );
123 dynamic_cast< moab::HypreParaSails*
>( psSolver )->SetSymmetry( 1 );
126 if( psSolver ) lsSolver->SetPreconditioner( *psSolver );
128 times[2] = MPI_Wtime() - times[2];
130 times[3] = MPI_Wtime();
131 ierr = lsSolver->Solve( b, x );
133 if(
ierr ) cerr <<
"Error in call to CG/GMRES HYPRE Solver: " <<
ierr <<
".\n" << endl;
135 times[3] = MPI_Wtime() - times[3];
136 lsSolver->GetNumIterations( niters );
137 lsSolver->GetFinalResidualNorm( normr );
138 times[4] = MPI_Wtime() - times[0];
141 double t4 = times[3];
145 MPI_Allreduce( &t4, &t4min, 1, MPI_DOUBLE, MPI_MIN,
MPI_COMM_WORLD );
146 MPI_Allreduce( &t4, &t4max, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD );
147 MPI_Allreduce( &t4, &t4avg, 1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD );
148 t4avg = t4avg / ( (double)
size );
160 std::cout <<
"Testing Laplace problem solver with HYPRE interface\n";
162 std::cout <<
"\tParallelism\n";
164 std::cout <<
"Number of MPI ranks: \t" <<
size << std::endl;
166 std::cout <<
"MPI not enabled\n";
169 std::cout <<
"Dimensions (nx*ny*nz): \t(" << nx <<
"*" << ny <<
"*" << nz <<
")\n";
170 std::cout <<
"Number of iterations: \t" << niters << std::endl;
171 std::cout <<
"Final residual: \t" << normr << std::endl;
172 std::cout <<
"************ Performance Summary (times in sec) *************" << std::endl;
174 std::cout <<
"\nTime Summary" << std::endl;
175 std::cout <<
"Total \t" << times[4] << std::endl;
176 std::cout <<
"Operator Creation \t" << times[1] << std::endl;
177 std::cout <<
"Solver Setup \t" << times[2] << std::endl;
178 std::cout <<
"HYPRE Solver \t" << times[3] << std::endl;
179 std::cout <<
"Avg. Solver \t" << t4avg << std::endl;
202 moab::HypreParMatrix& A,
203 moab::HypreParVector& sol,
204 moab::HypreParVector& rhs,
205 moab::HypreParVector& exactsol )
221 bool use_7pt_stencil =
false;
222 const int NNZPERROW = 27;
224 int local_nrow = nx * ny * nz;
225 assert( local_nrow > 0 );
226 int local_nnz = NNZPERROW * local_nrow;
228 MPI_Allreduce( &local_nrow, &total_nrow, 1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD );
231 long long total_nnz =
232 NNZPERROW * (
long long)total_nrow;
233 int start_row = local_nrow *
rank;
234 int stop_row = start_row + local_nrow - 1;
239 HYPRE_Int col[2] = { start_row, stop_row };
240 A.resize( total_nrow, col );
242 sol.resize( total_nrow, start_row, stop_row );
243 rhs.resize( total_nrow, start_row, stop_row );
244 exactsol.resize( total_nrow, start_row, stop_row );
245 long long nnzglobal = 0;
247 for(
int iz = 0; iz < nz; iz++ )
249 for(
int iy = 0; iy < ny; iy++ )
251 for(
int ix = 0; ix < nx; ix++ )
253 const int curlocalrow = iz * nx * ny + iy * nx + ix;
254 const int currow = start_row + curlocalrow;
256 std::vector< double > colvals;
257 std::vector< int > indices;
259 for(
int sz = -1; sz <= 1; sz++ )
261 for(
int sy = -1; sy <= 1; sy++ )
263 for(
int sx = -1; sx <= 1; sx++ )
265 const int curlocalcol = sz * nx * ny + sy * nx + sx;
266 const int curcol = currow + curlocalcol;
272 if( ( ix + sx >= 0 ) && ( ix + sx < nx ) && ( iy + sy >= 0 ) && ( iy + sy < ny ) &&
273 ( curcol >= 0 && curcol < total_nrow ) )
275 if( !use_7pt_stencil || ( sz * sz + sy * sy + sx * sx <= 1 ) )
278 if( curcol == currow )
280 colvals.push_back( 27.0 );
284 colvals.push_back( -1.0 );
287 indices.push_back( curcol );
295 int ncols = indices.size();
296 A.SetValues( 1, &ncols, &currow, indices.data(), colvals.data() );
300 sol.SetValue( currow, 0.0 );
301 rhs.SetValue( currow, 27.0 - ( (
double)( nnzrow - 1 ) ) );
302 exactsol.SetValue( currow, 1.0 );
308 cout <<
"Global size of the matrix: " << total_nrow <<
" and NNZ (estimate) = " << total_nnz
309 <<
" NNZ (actual) = " << nnzglobal << endl;
311 if(
debug ) cout <<
"Process " <<
rank <<
" of " <<
size <<
" has " << local_nrow << endl;
313 if(
debug ) cout <<
" rows. Global rows " << start_row <<
" through " << stop_row << endl;
315 if(
debug ) cout <<
"Process " <<
rank <<
" of " <<
size <<
" has " << local_nnz <<
" nonzeros." << endl;
317 A.FinalizeAssembly();
318 sol.FinalizeAssembly();
319 rhs.FinalizeAssembly();
320 exactsol.FinalizeAssembly();