Actual source code: ex18.c
1: static const char help[] = "Solves a (permuted) linear system in parallel with KSP.\n\
2: Input parameters include:\n\
3: -permute <natural,rcm,nd,...> : solve system in permuted indexing\n\
4: -random_exact_sol : use a random exact solution vector\n\
5: -view_exact_sol : write exact solution vector to stdout\n\
6: -m <mesh_x> : number of mesh points in x-direction\n\
7: -n <mesh_y> : number of mesh points in y-direction\n\n";
9: /*T
10: Concepts: KSP^basic parallel example;
11: Concepts: KSP^Laplacian, 2d
12: Concepts: Laplacian, 2d
13: Processors: n
14: T*/
18: /*
19: Include "petscksp.h" so that we can use KSP solvers. Note that this file
20: automatically includes:
21: petscsys.h - base PETSc routines petscvec.h - vectors
22: petscmat.h - matrices
23: petscis.h - index sets petscksp.h - Krylov subspace methods
24: petscviewer.h - viewers petscpc.h - preconditioners
25: */
26: #include <petscksp.h>
28: int main(int argc,char **args)
29: {
30: Vec x,b,u; /* approx solution, RHS, exact solution */
31: Mat A; /* linear system matrix */
32: KSP ksp; /* linear solver context */
33: PetscRandom rctx; /* random number generator context */
34: PetscReal norm; /* norm of solution error */
35: PetscInt i,j,Ii,J,Istart,Iend,m,n,its;
37: PetscBool random_exact_sol,view_exact_sol,permute;
38: char ordering[256] = MATORDERINGRCM;
39: IS rowperm = NULL,colperm = NULL;
40: PetscScalar v;
41: #if defined(PETSC_USE_LOG)
42: PetscLogStage stage;
43: #endif
45: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
46: PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"Poisson example options","");
47: {
48: m = 8;
49: PetscOptionsInt("-m","Number of grid points in x direction","",m,&m,NULL);
50: n = m-1;
51: PetscOptionsInt("-n","Number of grid points in y direction","",n,&n,NULL);
52: random_exact_sol = PETSC_FALSE;
53: PetscOptionsBool("-random_exact_sol","Choose a random exact solution","",random_exact_sol,&random_exact_sol,NULL);
54: view_exact_sol = PETSC_FALSE;
55: PetscOptionsBool("-view_exact_sol","View exact solution","",view_exact_sol,&view_exact_sol,NULL);
56: permute = PETSC_FALSE;
57: PetscOptionsFList("-permute","Permute matrix and vector to solving in new ordering","",MatOrderingList,ordering,ordering,sizeof(ordering),&permute);
58: }
59: PetscOptionsEnd();
60: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
61: Compute the matrix and right-hand-side vector that define
62: the linear system, Ax = b.
63: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
64: /*
65: Create parallel matrix, specifying only its global dimensions.
66: When using MatCreate(), the matrix format can be specified at
67: runtime. Also, the parallel partitioning of the matrix is
68: determined by PETSc at runtime.
70: Performance tuning note: For problems of substantial size,
71: preallocation of matrix memory is crucial for attaining good
72: performance. See the matrix chapter of the users manual for details.
73: */
74: MatCreate(PETSC_COMM_WORLD,&A);
75: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
76: MatSetFromOptions(A);
77: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
78: MatSeqAIJSetPreallocation(A,5,NULL);
79: MatSetUp(A);
81: /*
82: Currently, all PETSc parallel matrix formats are partitioned by
83: contiguous chunks of rows across the processors. Determine which
84: rows of the matrix are locally owned.
85: */
86: MatGetOwnershipRange(A,&Istart,&Iend);
88: /*
89: Set matrix elements for the 2-D, five-point stencil in parallel.
90: - Each processor needs to insert only elements that it owns
91: locally (but any non-local elements will be sent to the
92: appropriate processor during matrix assembly).
93: - Always specify global rows and columns of matrix entries.
95: Note: this uses the less common natural ordering that orders first
96: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
97: instead of J = I +- m as you might expect. The more standard ordering
98: would first do all variables for y = h, then y = 2h etc.
100: */
101: PetscLogStageRegister("Assembly", &stage);
102: PetscLogStagePush(stage);
103: for (Ii=Istart; Ii<Iend; Ii++) {
104: v = -1.0; i = Ii/n; j = Ii - i*n;
105: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
106: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
107: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
108: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
109: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
110: }
112: /*
113: Assemble matrix, using the 2-step process:
114: MatAssemblyBegin(), MatAssemblyEnd()
115: Computations can be done while messages are in transition
116: by placing code between these two statements.
117: */
118: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
119: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
120: PetscLogStagePop();
122: /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
123: MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);
125: /*
126: Create parallel vectors.
127: - We form 1 vector from scratch and then duplicate as needed.
128: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
129: in this example, we specify only the
130: vector's global dimension; the parallel partitioning is determined
131: at runtime.
132: - When solving a linear system, the vectors and matrices MUST
133: be partitioned accordingly. PETSc automatically generates
134: appropriately partitioned matrices and vectors when MatCreate()
135: and VecCreate() are used with the same communicator.
136: - The user can alternatively specify the local vector and matrix
137: dimensions when more sophisticated partitioning is needed
138: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
139: below).
140: */
141: VecCreate(PETSC_COMM_WORLD,&u);
142: VecSetSizes(u,PETSC_DECIDE,m*n);
143: VecSetFromOptions(u);
144: VecDuplicate(u,&b);
145: VecDuplicate(b,&x);
147: /*
148: Set exact solution; then compute right-hand-side vector.
149: By default we use an exact solution of a vector with all
150: elements of 1.0; Alternatively, using the runtime option
151: -random_sol forms a solution vector with random components.
152: */
153: if (random_exact_sol) {
154: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
155: PetscRandomSetFromOptions(rctx);
156: VecSetRandom(u,rctx);
157: PetscRandomDestroy(&rctx);
158: } else {
159: VecSet(u,1.0);
160: }
161: MatMult(A,u,b);
163: /*
164: View the exact solution vector if desired
165: */
166: if (view_exact_sol) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
168: if (permute) {
169: Mat Aperm;
170: MatGetOrdering(A,ordering,&rowperm,&colperm);
171: MatPermute(A,rowperm,colperm,&Aperm);
172: VecPermute(b,colperm,PETSC_FALSE);
173: MatDestroy(&A);
174: A = Aperm; /* Replace original operator with permuted version */
175: }
177: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
178: Create the linear solver and set various options
179: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
181: /*
182: Create linear solver context
183: */
184: KSPCreate(PETSC_COMM_WORLD,&ksp);
186: /*
187: Set operators. Here the matrix that defines the linear system
188: also serves as the preconditioning matrix.
189: */
190: KSPSetOperators(ksp,A,A);
192: /*
193: Set linear solver defaults for this problem (optional).
194: - By extracting the KSP and PC contexts from the KSP context,
195: we can then directly call any KSP and PC routines to set
196: various options.
197: - The following two statements are optional; all of these
198: parameters could alternatively be specified at runtime via
199: KSPSetFromOptions(). All of these defaults can be
200: overridden at runtime, as indicated below.
201: */
202: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),PETSC_DEFAULT,PETSC_DEFAULT,
203: PETSC_DEFAULT);
205: /*
206: Set runtime options, e.g.,
207: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
208: These options will override those specified above as long as
209: KSPSetFromOptions() is called _after_ any other customization
210: routines.
211: */
212: KSPSetFromOptions(ksp);
214: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
215: Solve the linear system
216: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
218: KSPSolve(ksp,b,x);
220: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
221: Check solution and clean up
222: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
224: if (permute) {VecPermute(x,rowperm,PETSC_TRUE);}
226: /*
227: Check the error
228: */
229: VecAXPY(x,-1.0,u);
230: VecNorm(x,NORM_2,&norm);
231: KSPGetIterationNumber(ksp,&its);
233: /*
234: Print convergence information. PetscPrintf() produces a single
235: print statement from all processes that share a communicator.
236: An alternative is PetscFPrintf(), which prints to a file.
237: */
238: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);
240: /*
241: Free work space. All PETSc objects should be destroyed when they
242: are no longer needed.
243: */
244: KSPDestroy(&ksp);
245: VecDestroy(&u); VecDestroy(&x);
246: VecDestroy(&b); MatDestroy(&A);
247: ISDestroy(&rowperm); ISDestroy(&colperm);
249: /*
250: Always call PetscFinalize() before exiting a program. This routine
251: - finalizes the PETSc libraries as well as MPI
252: - provides summary and diagnostic information if certain runtime
253: options are chosen (e.g., -log_view).
254: */
255: PetscFinalize();
256: return ierr;
257: }
260: /*TEST
262: test:
263: nsize: 3
264: args: -m 39 -n 18 -ksp_monitor_short -permute nd
265: requires: !single
267: test:
268: suffix: 2
269: nsize: 3
270: args: -m 39 -n 18 -ksp_monitor_short -permute rcm
271: requires: !single
273: test:
274: suffix: 3
275: nsize: 3
276: args: -m 13 -n 17 -ksp_monitor_short -ksp_type cg -ksp_cg_single_reduction
277: requires: !single
279: test:
280: suffix: bas
281: args: -m 13 -n 17 -ksp_monitor_short -ksp_type cg -pc_type icc -pc_factor_mat_solver_type bas -ksp_view -pc_factor_levels 1
282: filter: grep -v "variant "
283: requires: !single
285: TEST*/