Actual source code: ex2.c
petsc-3.8.4 2018-03-24
2: static char help[] = "Solves a linear system in parallel with KSP.\n\
3: Input parameters include:\n\
4: -random_exact_sol : use a random exact solution vector\n\
5: -view_exact_sol : write exact solution vector to stdout\n\
6: -m <mesh_x> : number of mesh points in x-direction\n\
7: -n <mesh_n> : number of mesh points in y-direction\n\n";
9: /*T
10: Concepts: KSP^basic parallel example;
11: Concepts: KSP^Laplacian, 2d
12: Concepts: Laplacian, 2d
13: Processors: n
14: T*/
16: /*
17: Include "petscksp.h" so that we can use KSP solvers. Note that this file
18: automatically includes:
19: petscsys.h - base PETSc routines petscvec.h - vectors
20: petscmat.h - matrices
21: petscis.h - index sets petscksp.h - Krylov subspace methods
22: petscviewer.h - viewers petscpc.h - preconditioners
23: */
24: #include <petscksp.h>
26: int main(int argc,char **args)
27: {
28: Vec x,b,u; /* approx solution, RHS, exact solution */
29: Mat A; /* linear system matrix */
30: KSP ksp; /* linear solver context */
31: PetscRandom rctx; /* random number generator context */
32: PetscReal norm; /* norm of solution error */
33: PetscInt i,j,Ii,J,Istart,Iend,m = 8,n = 7,its;
35: PetscBool flg = PETSC_FALSE;
36: PetscScalar v;
37: #if defined(PETSC_USE_LOG)
38: PetscLogStage stage;
39: #endif
41: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
42: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
43: PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
44: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
45: Compute the matrix and right-hand-side vector that define
46: the linear system, Ax = b.
47: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
48: /*
49: Create parallel matrix, specifying only its global dimensions.
50: When using MatCreate(), the matrix format can be specified at
51: runtime. Also, the parallel partitioning of the matrix is
52: determined by PETSc at runtime.
54: Performance tuning note: For problems of substantial size,
55: preallocation of matrix memory is crucial for attaining good
56: performance. See the matrix chapter of the users manual for details.
57: */
58: MatCreate(PETSC_COMM_WORLD,&A);
59: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
60: MatSetFromOptions(A);
61: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
62: MatSeqAIJSetPreallocation(A,5,NULL);
63: MatSeqSBAIJSetPreallocation(A,1,5,NULL);
64: MatMPISBAIJSetPreallocation(A,1,5,NULL,5,NULL);
66: /*
67: Currently, all PETSc parallel matrix formats are partitioned by
68: contiguous chunks of rows across the processors. Determine which
69: rows of the matrix are locally owned.
70: */
71: MatGetOwnershipRange(A,&Istart,&Iend);
73: /*
74: Set matrix elements for the 2-D, five-point stencil in parallel.
75: - Each processor needs to insert only elements that it owns
76: locally (but any non-local elements will be sent to the
77: appropriate processor during matrix assembly).
78: - Always specify global rows and columns of matrix entries.
80: Note: this uses the less common natural ordering that orders first
81: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
82: instead of J = I +- m as you might expect. The more standard ordering
83: would first do all variables for y = h, then y = 2h etc.
85: */
86: PetscLogStageRegister("Assembly", &stage);
87: PetscLogStagePush(stage);
88: for (Ii=Istart; Ii<Iend; Ii++) {
89: v = -1.0; i = Ii/n; j = Ii - i*n;
90: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
91: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
92: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
93: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
94: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,ADD_VALUES);
95: }
97: /*
98: Assemble matrix, using the 2-step process:
99: MatAssemblyBegin(), MatAssemblyEnd()
100: Computations can be done while messages are in transition
101: by placing code between these two statements.
102: */
103: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
104: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
105: PetscLogStagePop();
107: /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
108: MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);
110: /*
111: Create parallel vectors.
112: - We form 1 vector from scratch and then duplicate as needed.
113: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
114: in this example, we specify only the
115: vector's global dimension; the parallel partitioning is determined
116: at runtime.
117: - When solving a linear system, the vectors and matrices MUST
118: be partitioned accordingly. PETSc automatically generates
119: appropriately partitioned matrices and vectors when MatCreate()
120: and VecCreate() are used with the same communicator.
121: - The user can alternatively specify the local vector and matrix
122: dimensions when more sophisticated partitioning is needed
123: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
124: below).
125: */
126: VecCreate(PETSC_COMM_WORLD,&u);
127: VecSetSizes(u,PETSC_DECIDE,m*n);
128: VecSetFromOptions(u);
129: VecDuplicate(u,&b);
130: VecDuplicate(b,&x);
132: /*
133: Set exact solution; then compute right-hand-side vector.
134: By default we use an exact solution of a vector with all
135: elements of 1.0; Alternatively, using the runtime option
136: -random_sol forms a solution vector with random components.
137: */
138: PetscOptionsGetBool(NULL,NULL,"-random_exact_sol",&flg,NULL);
139: if (flg) {
140: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
141: PetscRandomSetFromOptions(rctx);
142: VecSetRandom(u,rctx);
143: PetscRandomDestroy(&rctx);
144: } else {
145: VecSet(u,1.0);
146: }
147: MatMult(A,u,b);
149: /*
150: View the exact solution vector if desired
151: */
152: flg = PETSC_FALSE;
153: PetscOptionsGetBool(NULL,NULL,"-view_exact_sol",&flg,NULL);
154: if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
156: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
157: Create the linear solver and set various options
158: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
160: /*
161: Create linear solver context
162: */
163: KSPCreate(PETSC_COMM_WORLD,&ksp);
165: /*
166: Set operators. Here the matrix that defines the linear system
167: also serves as the preconditioning matrix.
168: */
169: KSPSetOperators(ksp,A,A);
171: /*
172: Set linear solver defaults for this problem (optional).
173: - By extracting the KSP and PC contexts from the KSP context,
174: we can then directly call any KSP and PC routines to set
175: various options.
176: - The following two statements are optional; all of these
177: parameters could alternatively be specified at runtime via
178: KSPSetFromOptions(). All of these defaults can be
179: overridden at runtime, as indicated below.
180: */
181: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
182: PETSC_DEFAULT);
184: /*
185: Set runtime options, e.g.,
186: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
187: These options will override those specified above as long as
188: KSPSetFromOptions() is called _after_ any other customization
189: routines.
190: */
191: KSPSetFromOptions(ksp);
193: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
194: Solve the linear system
195: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
197: KSPSolve(ksp,b,x);
199: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
200: Check solution and clean up
201: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
203: /*
204: Check the error
205: */
206: VecAXPY(x,-1.0,u);
207: VecNorm(x,NORM_2,&norm);
208: KSPGetIterationNumber(ksp,&its);
210: /*
211: Print convergence information. PetscPrintf() produces a single
212: print statement from all processes that share a communicator.
213: An alternative is PetscFPrintf(), which prints to a file.
214: */
215: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);
217: /*
218: Free work space. All PETSc objects should be destroyed when they
219: are no longer needed.
220: */
221: KSPDestroy(&ksp);
222: VecDestroy(&u); VecDestroy(&x);
223: VecDestroy(&b); MatDestroy(&A);
225: /*
226: Always call PetscFinalize() before exiting a program. This routine
227: - finalizes the PETSc libraries as well as MPI
228: - provides summary and diagnostic information if certain runtime
229: options are chosen (e.g., -log_view).
230: */
231: PetscFinalize();
232: return ierr;
233: }