Actual source code: ex46.c

petsc-3.8.4 2018-03-24
Report Typos and Errors

  2: static char help[] = "Solves a linear system in parallel with KSP and DM.\n\
  3: Compare this to ex2 which solves the same problem without a DM.\n\n";

  5: /*T
  6:    Concepts: KSP^basic parallel example;
  7:    Concepts: KSP^Laplacian, 2d
  8:    Concepts: DM^using distributed arrays;
  9:    Concepts: Laplacian, 2d
 10:    Processors: n
 11: T*/

 13: /*
 14:   Include "petscdmda.h" so that we can use distributed arrays (DMDAs).
 15:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 16:   automatically includes:
 17:      petscsys.h       - base PETSc routines   petscvec.h - vectors
 18:      petscmat.h - matrices
 19:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 20:      petscviewer.h - viewers               petscpc.h  - preconditioners
 21: */
 22:  #include <petscdm.h>
 23:  #include <petscdmda.h>
 24:  #include <petscksp.h>

 26: int main(int argc,char **argv)
 27: {
 28:   DM             da;            /* distributed array */
 29:   Vec            x,b,u;         /* approx solution, RHS, exact solution */
 30:   Mat            A;             /* linear system matrix */
 31:   KSP            ksp;           /* linear solver context */
 32:   PetscRandom    rctx;          /* random number generator context */
 33:   PetscReal      norm;          /* norm of solution error */
 34:   PetscInt       i,j,its;
 36:   PetscBool      flg = PETSC_FALSE;
 37:   PetscLogStage  stage;
 38:   DMDALocalInfo  info;

 40:   PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
 41:   /*
 42:      Create distributed array to handle parallel distribution.
 43:      The problem size will default to 8 by 7, but this can be
 44:      changed using -da_grid_x M -da_grid_y N
 45:   */
 46:   DMDACreate2d(PETSC_COMM_WORLD, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE,DMDA_STENCIL_STAR,-8,-7,PETSC_DECIDE,PETSC_DECIDE,1,1,NULL,NULL,&da);
 47:   DMSetFromOptions(da);
 48:   DMSetUp(da);

 50:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 51:          Compute the matrix and right-hand-side vector that define
 52:          the linear system, Ax = b.
 53:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 54:   /*
 55:      Create parallel matrix preallocated according to the DMDA, format AIJ by default.
 56:      To use symmetric storage, run with -dm_mat_type sbaij -mat_ignore_lower_triangular
 57:   */
 58:   DMCreateMatrix(da,&A);

 60:   /*
 61:      Set matrix elements for the 2-D, five-point stencil in parallel.
 62:       - Each processor needs to insert only elements that it owns
 63:         locally (but any non-local elements will be sent to the
 64:         appropriate processor during matrix assembly).
 65:       - Rows and columns are specified by the stencil
 66:       - Entries are normalized for a domain [0,1]x[0,1]
 67:    */
 68:   PetscLogStageRegister("Assembly", &stage);
 69:   PetscLogStagePush(stage);
 70:   DMDAGetLocalInfo(da,&info);
 71:   for (j=info.ys; j<info.ys+info.ym; j++) {
 72:     for (i=info.xs; i<info.xs+info.xm; i++) {
 73:       PetscReal   hx  = 1./info.mx,hy = 1./info.my;
 74:       MatStencil  row = {0},col[5] = {{0}};
 75:       PetscScalar v[5];
 76:       PetscInt    ncols = 0;
 77:       row.j        = j; row.i = i;
 78:       col[ncols].j = j; col[ncols].i = i; v[ncols++] = 2*(hx/hy + hy/hx);
 79:       /* boundaries */
 80:       if (i>0)         {col[ncols].j = j;   col[ncols].i = i-1; v[ncols++] = -hy/hx;}
 81:       if (i<info.mx-1) {col[ncols].j = j;   col[ncols].i = i+1; v[ncols++] = -hy/hx;}
 82:       if (j>0)         {col[ncols].j = j-1; col[ncols].i = i;   v[ncols++] = -hx/hy;}
 83:       if (j<info.my-1) {col[ncols].j = j+1; col[ncols].i = i;   v[ncols++] = -hx/hy;}
 84:       MatSetValuesStencil(A,1,&row,ncols,col,v,INSERT_VALUES);
 85:     }
 86:   }

 88:   /*
 89:      Assemble matrix, using the 2-step process:
 90:        MatAssemblyBegin(), MatAssemblyEnd()
 91:      Computations can be done while messages are in transition
 92:      by placing code between these two statements.
 93:   */
 94:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
 95:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
 96:   PetscLogStagePop();

 98:   /*
 99:      Create parallel vectors compatible with the DMDA.
100:   */
101:   DMCreateGlobalVector(da,&u);
102:   VecDuplicate(u,&b);
103:   VecDuplicate(u,&x);

105:   /*
106:      Set exact solution; then compute right-hand-side vector.
107:      By default we use an exact solution of a vector with all
108:      elements of 1.0;  Alternatively, using the runtime option
109:      -random_sol forms a solution vector with random components.
110:   */
111:   PetscOptionsGetBool(NULL,NULL,"-random_exact_sol",&flg,NULL);
112:   if (flg) {
113:     PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
114:     PetscRandomSetFromOptions(rctx);
115:     VecSetRandom(u,rctx);
116:     PetscRandomDestroy(&rctx);
117:   } else {
118:     VecSet(u,1.);
119:   }
120:   MatMult(A,u,b);

122:   /*
123:      View the exact solution vector if desired
124:   */
125:   flg  = PETSC_FALSE;
126:   PetscOptionsGetBool(NULL,NULL,"-view_exact_sol",&flg,NULL);
127:   if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}

129:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
130:                 Create the linear solver and set various options
131:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

133:   /*
134:      Create linear solver context
135:   */
136:   KSPCreate(PETSC_COMM_WORLD,&ksp);

138:   /*
139:      Set operators. Here the matrix that defines the linear system
140:      also serves as the preconditioning matrix.
141:   */
142:   KSPSetOperators(ksp,A,A);

144:   /*
145:     Set runtime options, e.g.,
146:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
147:     These options will override those specified above as long as
148:     KSPSetFromOptions() is called _after_ any other customization
149:     routines.
150:   */
151:   KSPSetFromOptions(ksp);

153:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
154:                       Solve the linear system
155:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

157:   KSPSolve(ksp,b,x);

159:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
160:                       Check solution and clean up
161:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

163:   /*
164:      Check the error
165:   */
166:   VecAXPY(x,-1.,u);
167:   VecNorm(x,NORM_2,&norm);
168:   KSPGetIterationNumber(ksp,&its);

170:   /*
171:      Print convergence information.  PetscPrintf() produces a single
172:      print statement from all processes that share a communicator.
173:      An alternative is PetscFPrintf(), which prints to a file.
174:   */
175:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);

177:   /*
178:      Free work space.  All PETSc objects should be destroyed when they
179:      are no longer needed.
180:   */
181:   KSPDestroy(&ksp);
182:   VecDestroy(&u);
183:   VecDestroy(&x);
184:   VecDestroy(&b);
185:   MatDestroy(&A);
186:   DMDestroy(&da);

188:   /*
189:      Always call PetscFinalize() before exiting a program.  This routine
190:        - finalizes the PETSc libraries as well as MPI
191:        - provides summary and diagnostic information if certain runtime
192:          options are chosen (e.g., -log_view).
193:   */
194:   PetscFinalize();
195:   return ierr;
196: }