Actual source code: ex18.c
petsc-3.8.4 2018-03-24
1: static const char help[] = "Solves a (permuted) linear system in parallel with KSP.\n\
2: Input parameters include:\n\
3: -permute <natural,rcm,nd,...> : solve system in permuted indexing\n\
4: -random_exact_sol : use a random exact solution vector\n\
5: -view_exact_sol : write exact solution vector to stdout\n\
6: -m <mesh_x> : number of mesh points in x-direction\n\
7: -n <mesh_n> : number of mesh points in y-direction\n\n";
9: /*T
10: Concepts: KSP^basic parallel example;
11: Concepts: KSP^Laplacian, 2d
12: Concepts: Laplacian, 2d
13: Processors: n
14: T*/
16: /*
17: Include "petscksp.h" so that we can use KSP solvers. Note that this file
18: automatically includes:
19: petscsys.h - base PETSc routines petscvec.h - vectors
20: petscmat.h - matrices
21: petscis.h - index sets petscksp.h - Krylov subspace methods
22: petscviewer.h - viewers petscpc.h - preconditioners
23: */
24: #include <petscksp.h>
26: int main(int argc,char **args)
27: {
28: Vec x,b,u; /* approx solution, RHS, exact solution */
29: Mat A; /* linear system matrix */
30: KSP ksp; /* linear solver context */
31: PetscRandom rctx; /* random number generator context */
32: PetscReal norm; /* norm of solution error */
33: PetscInt i,j,Ii,J,Istart,Iend,m,n,its;
35: PetscBool random_exact_sol,view_exact_sol,permute;
36: char ordering[256] = MATORDERINGRCM;
37: IS rowperm = NULL,colperm = NULL;
38: PetscScalar v;
39: #if defined(PETSC_USE_LOG)
40: PetscLogStage stage;
41: #endif
43: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
44: PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"Poisson example options","");
45: {
46: m = 8;
47: PetscOptionsInt("-m","Number of grid points in x direction","",m,&m,NULL);
48: n = m-1;
49: PetscOptionsInt("-n","Number of grid points in y direction","",n,&n,NULL);
50: random_exact_sol = PETSC_FALSE;
51: PetscOptionsBool("-random_exact_sol","Choose a random exact solution","",random_exact_sol,&random_exact_sol,NULL);
52: view_exact_sol = PETSC_FALSE;
53: PetscOptionsBool("-view_exact_sol","View exact solution","",view_exact_sol,&view_exact_sol,NULL);
54: permute = PETSC_FALSE;
55: PetscOptionsFList("-permute","Permute matrix and vector to solving in new ordering","",MatOrderingList,ordering,ordering,sizeof(ordering),&permute);
56: }
57: PetscOptionsEnd();
58: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
59: Compute the matrix and right-hand-side vector that define
60: the linear system, Ax = b.
61: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
62: /*
63: Create parallel matrix, specifying only its global dimensions.
64: When using MatCreate(), the matrix format can be specified at
65: runtime. Also, the parallel partitioning of the matrix is
66: determined by PETSc at runtime.
68: Performance tuning note: For problems of substantial size,
69: preallocation of matrix memory is crucial for attaining good
70: performance. See the matrix chapter of the users manual for details.
71: */
72: MatCreate(PETSC_COMM_WORLD,&A);
73: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
74: MatSetFromOptions(A);
75: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
76: MatSeqAIJSetPreallocation(A,5,NULL);
77: MatSetUp(A);
79: /*
80: Currently, all PETSc parallel matrix formats are partitioned by
81: contiguous chunks of rows across the processors. Determine which
82: rows of the matrix are locally owned.
83: */
84: MatGetOwnershipRange(A,&Istart,&Iend);
86: /*
87: Set matrix elements for the 2-D, five-point stencil in parallel.
88: - Each processor needs to insert only elements that it owns
89: locally (but any non-local elements will be sent to the
90: appropriate processor during matrix assembly).
91: - Always specify global rows and columns of matrix entries.
93: Note: this uses the less common natural ordering that orders first
94: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
95: instead of J = I +- m as you might expect. The more standard ordering
96: would first do all variables for y = h, then y = 2h etc.
98: */
99: PetscLogStageRegister("Assembly", &stage);
100: PetscLogStagePush(stage);
101: for (Ii=Istart; Ii<Iend; Ii++) {
102: v = -1.0; i = Ii/n; j = Ii - i*n;
103: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
104: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
105: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
106: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
107: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
108: }
110: /*
111: Assemble matrix, using the 2-step process:
112: MatAssemblyBegin(), MatAssemblyEnd()
113: Computations can be done while messages are in transition
114: by placing code between these two statements.
115: */
116: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
117: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
118: PetscLogStagePop();
120: /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
121: MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);
123: /*
124: Create parallel vectors.
125: - We form 1 vector from scratch and then duplicate as needed.
126: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
127: in this example, we specify only the
128: vector's global dimension; the parallel partitioning is determined
129: at runtime.
130: - When solving a linear system, the vectors and matrices MUST
131: be partitioned accordingly. PETSc automatically generates
132: appropriately partitioned matrices and vectors when MatCreate()
133: and VecCreate() are used with the same communicator.
134: - The user can alternatively specify the local vector and matrix
135: dimensions when more sophisticated partitioning is needed
136: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
137: below).
138: */
139: VecCreate(PETSC_COMM_WORLD,&u);
140: VecSetSizes(u,PETSC_DECIDE,m*n);
141: VecSetFromOptions(u);
142: VecDuplicate(u,&b);
143: VecDuplicate(b,&x);
145: /*
146: Set exact solution; then compute right-hand-side vector.
147: By default we use an exact solution of a vector with all
148: elements of 1.0; Alternatively, using the runtime option
149: -random_sol forms a solution vector with random components.
150: */
151: if (random_exact_sol) {
152: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
153: PetscRandomSetFromOptions(rctx);
154: VecSetRandom(u,rctx);
155: PetscRandomDestroy(&rctx);
156: } else {
157: VecSet(u,1.0);
158: }
159: MatMult(A,u,b);
161: /*
162: View the exact solution vector if desired
163: */
164: if (view_exact_sol) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
166: if (permute) {
167: Mat Aperm;
168: MatGetOrdering(A,ordering,&rowperm,&colperm);
169: MatPermute(A,rowperm,colperm,&Aperm);
170: VecPermute(b,colperm,PETSC_FALSE);
171: MatDestroy(&A);
172: A = Aperm; /* Replace original operator with permuted version */
173: }
175: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
176: Create the linear solver and set various options
177: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
179: /*
180: Create linear solver context
181: */
182: KSPCreate(PETSC_COMM_WORLD,&ksp);
184: /*
185: Set operators. Here the matrix that defines the linear system
186: also serves as the preconditioning matrix.
187: */
188: KSPSetOperators(ksp,A,A);
190: /*
191: Set linear solver defaults for this problem (optional).
192: - By extracting the KSP and PC contexts from the KSP context,
193: we can then directly call any KSP and PC routines to set
194: various options.
195: - The following two statements are optional; all of these
196: parameters could alternatively be specified at runtime via
197: KSPSetFromOptions(). All of these defaults can be
198: overridden at runtime, as indicated below.
199: */
200: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
201: PETSC_DEFAULT);
203: /*
204: Set runtime options, e.g.,
205: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
206: These options will override those specified above as long as
207: KSPSetFromOptions() is called _after_ any other customization
208: routines.
209: */
210: KSPSetFromOptions(ksp);
212: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
213: Solve the linear system
214: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
216: KSPSolve(ksp,b,x);
218: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
219: Check solution and clean up
220: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
222: if (permute) {VecPermute(x,rowperm,PETSC_TRUE);}
224: /*
225: Check the error
226: */
227: VecAXPY(x,-1.0,u);
228: VecNorm(x,NORM_2,&norm);
229: KSPGetIterationNumber(ksp,&its);
231: /*
232: Print convergence information. PetscPrintf() produces a single
233: print statement from all processes that share a communicator.
234: An alternative is PetscFPrintf(), which prints to a file.
235: */
236: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);
238: /*
239: Free work space. All PETSc objects should be destroyed when they
240: are no longer needed.
241: */
242: KSPDestroy(&ksp);
243: VecDestroy(&u); VecDestroy(&x);
244: VecDestroy(&b); MatDestroy(&A);
245: ISDestroy(&rowperm); ISDestroy(&colperm);
247: /*
248: Always call PetscFinalize() before exiting a program. This routine
249: - finalizes the PETSc libraries as well as MPI
250: - provides summary and diagnostic information if certain runtime
251: options are chosen (e.g., -log_view).
252: */
253: PetscFinalize();
254: return ierr;
255: }