Actual source code: ex9.c
petsc-3.7.7 2017-09-25
2: static char help[] = "The solution of 2 different linear systems with different linear solvers.\n\
3: Also, this example illustrates the repeated\n\
4: solution of linear systems, while reusing matrix, vector, and solver data\n\
5: structures throughout the process. Note the various stages of event logging.\n\n";
7: /*T
8: Concepts: KSP^repeatedly solving linear systems;
9: Concepts: PetscLog^profiling multiple stages of code;
10: Concepts: PetscLog^user-defined event profiling;
11: Processors: n
12: T*/
14: /*
15: Include "petscksp.h" so that we can use KSP solvers. Note that this file
16: automatically includes:
17: petscsys.h - base PETSc routines petscvec.h - vectors
18: petscmat.h - matrices
19: petscis.h - index sets petscksp.h - Krylov subspace methods
20: petscviewer.h - viewers petscpc.h - preconditioners
21: */
22: #include <petscksp.h>
24: /*
25: Declare user-defined routines
26: */
27: extern PetscErrorCode CheckError(Vec,Vec,Vec,PetscInt,PetscReal,PetscLogEvent);
28: extern PetscErrorCode MyKSPMonitor(KSP,PetscInt,PetscReal,void*);
32: int main(int argc,char **args)
33: {
34: Vec x1,b1,x2,b2; /* solution and RHS vectors for systems #1 and #2 */
35: Vec u; /* exact solution vector */
36: Mat C1,C2; /* matrices for systems #1 and #2 */
37: KSP ksp1,ksp2; /* KSP contexts for systems #1 and #2 */
38: PetscInt ntimes = 3; /* number of times to solve the linear systems */
39: PetscLogEvent CHECK_ERROR; /* event number for error checking */
40: PetscInt ldim,low,high,iglobal,Istart,Iend,Istart2,Iend2;
41: PetscInt Ii,J,i,j,m = 3,n = 2,its,t;
43: PetscBool flg = PETSC_FALSE;
44: PetscScalar v;
45: PetscMPIInt rank,size;
46: #if defined(PETSC_USE_LOG)
47: PetscLogStage stages[3];
48: #endif
50: PetscInitialize(&argc,&args,(char*)0,help);
51: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
52: PetscOptionsGetInt(NULL,NULL,"-t",&ntimes,NULL);
53: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
54: MPI_Comm_size(PETSC_COMM_WORLD,&size);
55: n = 2*size;
57: /*
58: Register various stages for profiling
59: */
60: PetscLogStageRegister("Prelim setup",&stages[0]);
61: PetscLogStageRegister("Linear System 1",&stages[1]);
62: PetscLogStageRegister("Linear System 2",&stages[2]);
64: /*
65: Register a user-defined event for profiling (error checking).
66: */
67: CHECK_ERROR = 0;
68: PetscLogEventRegister("Check Error",KSP_CLASSID,&CHECK_ERROR);
70: /* - - - - - - - - - - - - Stage 0: - - - - - - - - - - - - - -
71: Preliminary Setup
72: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
74: PetscLogStagePush(stages[0]);
76: /*
77: Create data structures for first linear system.
78: - Create parallel matrix, specifying only its global dimensions.
79: When using MatCreate(), the matrix format can be specified at
80: runtime. Also, the parallel partitioning of the matrix is
81: determined by PETSc at runtime.
82: - Create parallel vectors.
83: - When using VecSetSizes(), we specify only the vector's global
84: dimension; the parallel partitioning is determined at runtime.
85: - Note: We form 1 vector from scratch and then duplicate as needed.
86: */
87: MatCreate(PETSC_COMM_WORLD,&C1);
88: MatSetSizes(C1,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
89: MatSetFromOptions(C1);
90: MatSetUp(C1);
91: MatGetOwnershipRange(C1,&Istart,&Iend);
92: VecCreate(PETSC_COMM_WORLD,&u);
93: VecSetSizes(u,PETSC_DECIDE,m*n);
94: VecSetFromOptions(u);
95: VecDuplicate(u,&b1);
96: VecDuplicate(u,&x1);
98: /*
99: Create first linear solver context.
100: Set runtime options (e.g., -pc_type <type>).
101: Note that the first linear system uses the default option
102: names, while the second linear systme uses a different
103: options prefix.
104: */
105: KSPCreate(PETSC_COMM_WORLD,&ksp1);
106: KSPSetFromOptions(ksp1);
108: /*
109: Set user-defined monitoring routine for first linear system.
110: */
111: PetscOptionsGetBool(NULL,NULL,"-my_ksp_monitor",&flg,NULL);
112: if (flg) {KSPMonitorSet(ksp1,MyKSPMonitor,NULL,0);}
114: /*
115: Create data structures for second linear system.
116: */
117: MatCreate(PETSC_COMM_WORLD,&C2);
118: MatSetSizes(C2,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
119: MatSetFromOptions(C2);
120: MatSetUp(C2);
121: MatGetOwnershipRange(C2,&Istart2,&Iend2);
122: VecDuplicate(u,&b2);
123: VecDuplicate(u,&x2);
125: /*
126: Create second linear solver context
127: */
128: KSPCreate(PETSC_COMM_WORLD,&ksp2);
130: /*
131: Set different options prefix for second linear system.
132: Set runtime options (e.g., -s2_pc_type <type>)
133: */
134: KSPAppendOptionsPrefix(ksp2,"s2_");
135: KSPSetFromOptions(ksp2);
137: /*
138: Assemble exact solution vector in parallel. Note that each
139: processor needs to set only its local part of the vector.
140: */
141: VecGetLocalSize(u,&ldim);
142: VecGetOwnershipRange(u,&low,&high);
143: for (i=0; i<ldim; i++) {
144: iglobal = i + low;
145: v = (PetscScalar)(i + 100*rank);
146: VecSetValues(u,1,&iglobal,&v,ADD_VALUES);
147: }
148: VecAssemblyBegin(u);
149: VecAssemblyEnd(u);
151: /*
152: Log the number of flops for computing vector entries
153: */
154: PetscLogFlops(2.0*ldim);
156: /*
157: End curent profiling stage
158: */
159: PetscLogStagePop();
161: /* --------------------------------------------------------------
162: Linear solver loop:
163: Solve 2 different linear systems several times in succession
164: -------------------------------------------------------------- */
166: for (t=0; t<ntimes; t++) {
168: /* - - - - - - - - - - - - Stage 1: - - - - - - - - - - - - - -
169: Assemble and solve first linear system
170: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
172: /*
173: Begin profiling stage #1
174: */
175: PetscLogStagePush(stages[1]);
177: /*
178: Initialize all matrix entries to zero. MatZeroEntries() retains
179: the nonzero structure of the matrix for sparse formats.
180: */
181: if (t > 0) {MatZeroEntries(C1);}
183: /*
184: Set matrix entries in parallel. Also, log the number of flops
185: for computing matrix entries.
186: - Each processor needs to insert only elements that it owns
187: locally (but any non-local elements will be sent to the
188: appropriate processor during matrix assembly).
189: - Always specify global row and columns of matrix entries.
190: */
191: for (Ii=Istart; Ii<Iend; Ii++) {
192: v = -1.0; i = Ii/n; j = Ii - i*n;
193: if (i>0) {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
194: if (i<m-1) {J = Ii + n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
195: if (j>0) {J = Ii - 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
196: if (j<n-1) {J = Ii + 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
197: v = 4.0; MatSetValues(C1,1,&Ii,1,&Ii,&v,ADD_VALUES);
198: }
199: for (Ii=Istart; Ii<Iend; Ii++) { /* Make matrix nonsymmetric */
200: v = -1.0*(t+0.5); i = Ii/n;
201: if (i>0) {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
202: }
203: PetscLogFlops(2.0*(Iend-Istart));
205: /*
206: Assemble matrix, using the 2-step process:
207: MatAssemblyBegin(), MatAssemblyEnd()
208: Computations can be done while messages are in transition
209: by placing code between these two statements.
210: */
211: MatAssemblyBegin(C1,MAT_FINAL_ASSEMBLY);
212: MatAssemblyEnd(C1,MAT_FINAL_ASSEMBLY);
214: /*
215: Indicate same nonzero structure of successive linear system matrices
216: */
217: MatSetOption(C1,MAT_NEW_NONZERO_LOCATIONS,PETSC_TRUE);
219: /*
220: Compute right-hand-side vector
221: */
222: MatMult(C1,u,b1);
224: /*
225: Set operators. Here the matrix that defines the linear system
226: also serves as the preconditioning matrix.
227: */
228: KSPSetOperators(ksp1,C1,C1);
230: /*
231: Use the previous solution of linear system #1 as the initial
232: guess for the next solve of linear system #1. The user MUST
233: call KSPSetInitialGuessNonzero() in indicate use of an initial
234: guess vector; otherwise, an initial guess of zero is used.
235: */
236: if (t>0) {
237: KSPSetInitialGuessNonzero(ksp1,PETSC_TRUE);
238: }
240: /*
241: Solve the first linear system. Here we explicitly call
242: KSPSetUp() for more detailed performance monitoring of
243: certain preconditioners, such as ICC and ILU. This call
244: is optional, ase KSPSetUp() will automatically be called
245: within KSPSolve() if it hasn't been called already.
246: */
247: KSPSetUp(ksp1);
248: KSPSolve(ksp1,b1,x1);
249: KSPGetIterationNumber(ksp1,&its);
251: /*
252: Check error of solution to first linear system
253: */
254: CheckError(u,x1,b1,its,1.e-4,CHECK_ERROR);
256: /* - - - - - - - - - - - - Stage 2: - - - - - - - - - - - - - -
257: Assemble and solve second linear system
258: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
260: /*
261: Conclude profiling stage #1; begin profiling stage #2
262: */
263: PetscLogStagePop();
264: PetscLogStagePush(stages[2]);
266: /*
267: Initialize all matrix entries to zero
268: */
269: if (t > 0) {MatZeroEntries(C2);}
271: /*
272: Assemble matrix in parallel. Also, log the number of flops
273: for computing matrix entries.
274: - To illustrate the features of parallel matrix assembly, we
275: intentionally set the values differently from the way in
276: which the matrix is distributed across the processors. Each
277: entry that is not owned locally will be sent to the appropriate
278: processor during MatAssemblyBegin() and MatAssemblyEnd().
279: - For best efficiency the user should strive to set as many
280: entries locally as possible.
281: */
282: for (i=0; i<m; i++) {
283: for (j=2*rank; j<2*rank+2; j++) {
284: v = -1.0; Ii = j + n*i;
285: if (i>0) {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
286: if (i<m-1) {J = Ii + n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
287: if (j>0) {J = Ii - 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
288: if (j<n-1) {J = Ii + 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
289: v = 6.0 + t*0.5; MatSetValues(C2,1,&Ii,1,&Ii,&v,ADD_VALUES);
290: }
291: }
292: for (Ii=Istart2; Ii<Iend2; Ii++) { /* Make matrix nonsymmetric */
293: v = -1.0*(t+0.5); i = Ii/n;
294: if (i>0) {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
295: }
296: MatAssemblyBegin(C2,MAT_FINAL_ASSEMBLY);
297: MatAssemblyEnd(C2,MAT_FINAL_ASSEMBLY);
298: PetscLogFlops(2.0*(Iend-Istart));
300: /*
301: Indicate same nonzero structure of successive linear system matrices
302: */
303: MatSetOption(C2,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);
305: /*
306: Compute right-hand-side vector
307: */
308: MatMult(C2,u,b2);
310: /*
311: Set operators. Here the matrix that defines the linear system
312: also serves as the preconditioning matrix. Indicate same nonzero
313: structure of successive preconditioner matrices by setting flag
314: SAME_NONZERO_PATTERN.
315: */
316: KSPSetOperators(ksp2,C2,C2);
318: /*
319: Solve the second linear system
320: */
321: KSPSetUp(ksp2);
322: KSPSolve(ksp2,b2,x2);
323: KSPGetIterationNumber(ksp2,&its);
325: /*
326: Check error of solution to second linear system
327: */
328: CheckError(u,x2,b2,its,1.e-4,CHECK_ERROR);
330: /*
331: Conclude profiling stage #2
332: */
333: PetscLogStagePop();
334: }
335: /* --------------------------------------------------------------
336: End of linear solver loop
337: -------------------------------------------------------------- */
339: /*
340: Free work space. All PETSc objects should be destroyed when they
341: are no longer needed.
342: */
343: KSPDestroy(&ksp1); KSPDestroy(&ksp2);
344: VecDestroy(&x1); VecDestroy(&x2);
345: VecDestroy(&b1); VecDestroy(&b2);
346: MatDestroy(&C1); MatDestroy(&C2);
347: VecDestroy(&u);
349: PetscFinalize();
350: return 0;
351: }
354: /* ------------------------------------------------------------- */
355: /*
356: CheckError - Checks the error of the solution.
358: Input Parameters:
359: u - exact solution
360: x - approximate solution
361: b - work vector
362: its - number of iterations for convergence
363: tol - tolerance
364: CHECK_ERROR - the event number for error checking
365: (for use with profiling)
367: Notes:
368: In order to profile this section of code separately from the
369: rest of the program, we register it as an "event" with
370: PetscLogEventRegister() in the main program. Then, we indicate
371: the start and end of this event by respectively calling
372: PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
373: PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
374: Here, we specify the objects most closely associated with
375: the event (the vectors u,x,b). Such information is optional;
376: we could instead just use 0 instead for all objects.
377: */
378: PetscErrorCode CheckError(Vec u,Vec x,Vec b,PetscInt its,PetscReal tol,PetscLogEvent CHECK_ERROR)
379: {
380: PetscScalar none = -1.0;
381: PetscReal norm;
384: PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
386: /*
387: Compute error of the solution, using b as a work vector.
388: */
389: VecCopy(x,b);
390: VecAXPY(b,none,u);
391: VecNorm(b,NORM_2,&norm);
392: if (norm > tol) {
393: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g, Iterations %D\n",(double)norm,its);
394: }
395: PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
396: return 0;
397: }
398: /* ------------------------------------------------------------- */
401: /*
402: MyKSPMonitor - This is a user-defined routine for monitoring
403: the KSP iterative solvers.
405: Input Parameters:
406: ksp - iterative context
407: n - iteration number
408: rnorm - 2-norm (preconditioned) residual value (may be estimated)
409: dummy - optional user-defined monitor context (unused here)
410: */
411: PetscErrorCode MyKSPMonitor(KSP ksp,PetscInt n,PetscReal rnorm,void *dummy)
412: {
413: Vec x;
416: /*
417: Build the solution vector
418: */
419: KSPBuildSolution(ksp,NULL,&x);
421: /*
422: Write the solution vector and residual norm to stdout.
423: - PetscPrintf() handles output for multiprocessor jobs
424: by printing from only one processor in the communicator.
425: - The parallel viewer PETSC_VIEWER_STDOUT_WORLD handles
426: data from multiple processors so that the output
427: is not jumbled.
428: */
429: PetscPrintf(PETSC_COMM_WORLD,"iteration %D solution vector:\n",n);
430: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
431: PetscPrintf(PETSC_COMM_WORLD,"iteration %D KSP Residual norm %14.12e \n",n,rnorm);
432: return 0;
433: }