Actual source code: ex11.c
petsc-3.12.5 2020-03-29
2: static char help[] = "Solves a linear system in parallel with KSP.\n\n";
4: /*T
5: Concepts: KSP^solving a Helmholtz equation
6: Concepts: complex numbers;
7: Concepts: Helmholtz equation
8: Processors: n
9: T*/
13: /*
14: Description: Solves a complex linear system in parallel with KSP.
16: The model problem:
17: Solve Helmholtz equation on the unit square: (0,1) x (0,1)
18: -delta u - sigma1*u + i*sigma2*u = f,
19: where delta = Laplace operator
20: Dirichlet b.c.'s on all sides
21: Use the 2-D, five-point finite difference stencil.
23: Compiling the code:
24: This code uses the complex numbers version of PETSc, so configure
25: must be run to enable this
26: */
28: /*
29: Include "petscksp.h" so that we can use KSP solvers. Note that this file
30: automatically includes:
31: petscsys.h - base PETSc routines petscvec.h - vectors
32: petscmat.h - matrices
33: petscis.h - index sets petscksp.h - Krylov subspace methods
34: petscviewer.h - viewers petscpc.h - preconditioners
35: */
36: #include <petscksp.h>
38: int main(int argc,char **args)
39: {
40: Vec x,b,u; /* approx solution, RHS, exact solution */
41: Mat A; /* linear system matrix */
42: KSP ksp; /* linear solver context */
43: PetscReal norm; /* norm of solution error */
44: PetscInt dim,i,j,Ii,J,Istart,Iend,n = 6,its,use_random;
46: PetscScalar v,none = -1.0,sigma2,pfive = 0.5,*xa;
47: PetscRandom rctx;
48: PetscReal h2,sigma1 = 100.0;
49: PetscBool flg = PETSC_FALSE;
51: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
52: PetscOptionsGetReal(NULL,NULL,"-sigma1",&sigma1,NULL);
53: PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
54: dim = n*n;
56: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
57: Compute the matrix and right-hand-side vector that define
58: the linear system, Ax = b.
59: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
60: /*
61: Create parallel matrix, specifying only its global dimensions.
62: When using MatCreate(), the matrix format can be specified at
63: runtime. Also, the parallel partitioning of the matrix is
64: determined by PETSc at runtime.
65: */
66: MatCreate(PETSC_COMM_WORLD,&A);
67: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,dim,dim);
68: MatSetFromOptions(A);
69: MatSetUp(A);
71: /*
72: Currently, all PETSc parallel matrix formats are partitioned by
73: contiguous chunks of rows across the processors. Determine which
74: rows of the matrix are locally owned.
75: */
76: MatGetOwnershipRange(A,&Istart,&Iend);
78: /*
79: Set matrix elements in parallel.
80: - Each processor needs to insert only elements that it owns
81: locally (but any non-local elements will be sent to the
82: appropriate processor during matrix assembly).
83: - Always specify global rows and columns of matrix entries.
84: */
86: PetscOptionsGetBool(NULL,NULL,"-norandom",&flg,NULL);
87: if (flg) use_random = 0;
88: else use_random = 1;
89: if (use_random) {
90: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
91: PetscRandomSetFromOptions(rctx);
92: PetscRandomSetInterval(rctx,0.0,PETSC_i);
93: } else {
94: sigma2 = 10.0*PETSC_i;
95: }
96: h2 = 1.0/((n+1)*(n+1));
97: for (Ii=Istart; Ii<Iend; Ii++) {
98: v = -1.0; i = Ii/n; j = Ii - i*n;
99: if (i>0) {
100: J = Ii-n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);
101: }
102: if (i<n-1) {
103: J = Ii+n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);
104: }
105: if (j>0) {
106: J = Ii-1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);
107: }
108: if (j<n-1) {
109: J = Ii+1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);
110: }
111: if (use_random) {PetscRandomGetValue(rctx,&sigma2);}
112: v = 4.0 - sigma1*h2 + sigma2*h2;
113: MatSetValues(A,1,&Ii,1,&Ii,&v,ADD_VALUES);
114: }
115: if (use_random) {PetscRandomDestroy(&rctx);}
117: /*
118: Assemble matrix, using the 2-step process:
119: MatAssemblyBegin(), MatAssemblyEnd()
120: Computations can be done while messages are in transition
121: by placing code between these two statements.
122: */
123: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
124: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
126: /*
127: Create parallel vectors.
128: - When using VecCreate(), VecSetSizes() and VecSetFromOptions(),
129: we specify only the vector's global
130: dimension; the parallel partitioning is determined at runtime.
131: - Note: We form 1 vector from scratch and then duplicate as needed.
132: */
133: VecCreate(PETSC_COMM_WORLD,&u);
134: VecSetSizes(u,PETSC_DECIDE,dim);
135: VecSetFromOptions(u);
136: VecDuplicate(u,&b);
137: VecDuplicate(b,&x);
139: /*
140: Set exact solution; then compute right-hand-side vector.
141: */
143: if (use_random) {
144: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
145: PetscRandomSetFromOptions(rctx);
146: VecSetRandom(u,rctx);
147: } else {
148: VecSet(u,pfive);
149: }
150: MatMult(A,u,b);
152: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
153: Create the linear solver and set various options
154: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
156: /*
157: Create linear solver context
158: */
159: KSPCreate(PETSC_COMM_WORLD,&ksp);
161: /*
162: Set operators. Here the matrix that defines the linear system
163: also serves as the preconditioning matrix.
164: */
165: KSPSetOperators(ksp,A,A);
167: /*
168: Set runtime options, e.g.,
169: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
170: */
171: KSPSetFromOptions(ksp);
173: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
174: Solve the linear system
175: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
177: KSPSolve(ksp,b,x);
179: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
180: Check solution and clean up
181: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
183: /*
184: Print the first 3 entries of x; this demonstrates extraction of the
185: real and imaginary components of the complex vector, x.
186: */
187: flg = PETSC_FALSE;
188: PetscOptionsGetBool(NULL,NULL,"-print_x3",&flg,NULL);
189: if (flg) {
190: VecGetArray(x,&xa);
191: PetscPrintf(PETSC_COMM_WORLD,"The first three entries of x are:\n");
192: for (i=0; i<3; i++) {
193: PetscPrintf(PETSC_COMM_WORLD,"x[%D] = %g + %g i\n",i,(double)PetscRealPart(xa[i]),(double)PetscImaginaryPart(xa[i]));
194: }
195: VecRestoreArray(x,&xa);
196: }
198: /*
199: Check the error
200: */
201: VecAXPY(x,none,u);
202: VecNorm(x,NORM_2,&norm);
203: KSPGetIterationNumber(ksp,&its);
204: if (norm < 1.e-12) {
205: PetscPrintf(PETSC_COMM_WORLD,"Norm of error < 1.e-12 iterations %D\n",its);
206: } else {
207: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);
208: }
210: /*
211: Free work space. All PETSc objects should be destroyed when they
212: are no longer needed.
213: */
214: KSPDestroy(&ksp);
215: if (use_random) {PetscRandomDestroy(&rctx);}
216: VecDestroy(&u); VecDestroy(&x);
217: VecDestroy(&b); MatDestroy(&A);
218: PetscFinalize();
219: return ierr;
220: }
223: /*TEST
225: build:
226: requires: complex
228: test:
229: args: -n 6 -norandom -pc_type none -ksp_monitor_short -ksp_gmres_cgs_refinement_type refine_always
231: testset:
232: suffix: deflation
233: args: -n 6 -norandom -pc_type deflation -ksp_monitor_short
234: test:
235: test:
236: requires: superlu_dist
237: nsize: 3
238: args: -pc_deflation_compute_space {{db2 aggregation}}
240: test:
241: suffix: pc_deflation_init_only-0
242: requires: superlu_dist
243: nsize: 4
244: args: -ksp_type fgmres -pc_deflation_compute_space db4 -pc_deflation_compute_space_size 2 -pc_deflation_levels 2 -deflation_ksp_max_it 10
245: #TODO remove suffix and next test when this works
246: #args: -pc_deflation_init_only {{0 1}separate output}
247: args: -pc_deflation_init_only 0
249: test:
250: suffix: pc_deflation_init_only-1
251: requires: superlu_dist
252: nsize: 4
253: args: -ksp_type fgmres -pc_deflation_compute_space db4 -pc_deflation_compute_space_size 2 -pc_deflation_levels 2 -deflation_ksp_max_it 10
254: args: -pc_deflation_init_only 1
256: TEST*/