Actual source code: ex13f90.F90

petsc-3.8.4 2018-03-24
Report Typos and Errors
  1: !
  2: !
  3: !/*T
  4: !   Concepts: KSP^basic sequential example
  5: !   Concepts: KSP^Laplacian, 2d
  6: !   Concepts: Laplacian, 2d
  7: !   Processors: 1
  8: !T*/
  9: ! -----------------------------------------------------------------------

 11:       module UserModule
 12:  #include <petsc/finclude/petscksp.h>
 13:         use petscksp
 14:         type User
 15:           Vec x
 16:           Vec b
 17:           Mat A
 18:           KSP ksp
 19:           PetscInt m
 20:           PetscInt n
 21:         end type User
 22:       end module

 24:       program main
 25:       use UserModule
 26:       implicit none

 28: !    User-defined context that contains all the data structures used
 29: !    in the linear solution process.

 31: !   Vec    x,b      /* solution vector, right hand side vector and work vector */
 32: !   Mat    A        /* sparse matrix */
 33: !   KSP   ksp     /* linear solver context */
 34: !   int    m,n      /* grid dimensions */
 35: !
 36: !   Since we cannot store Scalars and integers in the same context,
 37: !   we store the integers/pointers in the user-defined context, and
 38: !   the scalar values are carried in the common block.
 39: !   The scalar values in this simplistic example could easily
 40: !   be recalculated in each routine, where they are needed.
 41: !
 42: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

 44: !  Note: Any user-defined Fortran routines MUST be declared as external.

 46:       external UserInitializeLinearSolver
 47:       external UserFinalizeLinearSolver
 48:       external UserDoLinearSolver

 50: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 51: !                   Variable declarations
 52: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 54:       PetscScalar  hx,hy,x,y
 55:       type(User) userctx
 56:       PetscErrorCode ierr
 57:       PetscInt m,n,t,tmax,i,j
 58:       PetscBool  flg
 59:       PetscMPIInt size,rank
 60:       PetscReal  enorm
 61:       PetscScalar cnorm
 62:       PetscScalar,ALLOCATABLE :: userx(:,:)
 63:       PetscScalar,ALLOCATABLE :: userb(:,:)
 64:       PetscScalar,ALLOCATABLE :: solution(:,:)
 65:       PetscScalar,ALLOCATABLE :: rho(:,:)

 67:       PetscReal hx2,hy2
 68:       common /param/ hx2,hy2

 70:       tmax = 2
 71:       m = 6
 72:       n = 7

 74: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 75: !                 Beginning of program
 76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 78:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 79:       if (ierr .ne. 0) then
 80:         print*,'Unable to initialize PETSc'
 81:         stop
 82:       endif
 83:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 84:       if (size .ne. 1) then
 85:          call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
 86:          if (rank .eq. 0) then
 87:             write(6,*) 'This is a uniprocessor example only!'
 88:          endif
 89:          SETERRA(PETSC_COMM_WORLD,1,' ')
 90:       endif

 92: !  The next two lines are for testing only; these allow the user to
 93: !  decide the grid size at runtime.

 95:       call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-m',m,flg,ierr);CHKERRA(ierr)
 96:       call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-n',n,flg,ierr);CHKERRA(ierr)

 98: !  Create the empty sparse matrix and linear solver data structures

100:       call UserInitializeLinearSolver(m,n,userctx,ierr);CHKERRA(ierr)

102: !  Allocate arrays to hold the solution to the linear system.  This
103: !  approach is not normally done in PETSc programs, but in this case,
104: !  since we are calling these routines from a non-PETSc program, we
105: !  would like to reuse the data structures from another code. So in
106: !  the context of a larger application these would be provided by
107: !  other (non-PETSc) parts of the application code.

109:       ALLOCATE (userx(m,n),userb(m,n),solution(m,n))

111: !  Allocate an array to hold the coefficients in the elliptic operator

113:        ALLOCATE (rho(m,n))

115: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
116: !  right-hand-side b[] and the solution with a known problem for testing.

118:       hx = 1.0/real(m+1)
119:       hy = 1.0/real(n+1)
120:       y  = hy
121:       do 20 j=1,n
122:          x = hx
123:          do 10 i=1,m
124:             rho(i,j)      = x
125:             solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
126:             userb(i,j)    = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*              &
127:      &                sin(2.*PETSC_PI*y) +                                &
128:      &                8*PETSC_PI*PETSC_PI*x*                              &
129:      &                sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
130:            x = x + hx
131:  10      continue
132:          y = y + hy
133:  20   continue

135: !  Loop over a bunch of timesteps, setting up and solver the linear
136: !  system for each time-step.
137: !  Note that this loop is somewhat artificial. It is intended to
138: !  demonstrate how one may reuse the linear solvers in each time-step.

140:       do 100 t=1,tmax
141:          call UserDoLinearSolver(rho,userctx,userb,userx,ierr);CHKERRA(ierr)

143: !        Compute error: Note that this could (and usually should) all be done
144: !        using the PETSc vector operations. Here we demonstrate using more
145: !        standard programming practices to show how they may be mixed with
146: !        PETSc.
147:          cnorm = 0.0
148:          do 90 j=1,n
149:             do 80 i=1,m
150:               cnorm = cnorm +                                              &
151:      &    PetscConj(solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
152:  80         continue
153:  90      continue
154:          enorm =  PetscRealPart(cnorm*hx*hy)
155:          write(6,115) m,n,enorm
156:  115     format ('m = ',I2,' n = ',I2,' error norm = ',1PE11.4)
157:  100  continue

159: !  We are finished solving linear systems, so we clean up the
160: !  data structures.

162:       DEALLOCATE (userx,userb,solution,rho)

164:       call UserFinalizeLinearSolver(userctx,ierr);CHKERRA(ierr)
165:       call PetscFinalize(ierr)
166:       end

168: ! ----------------------------------------------------------------
169:       subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
170:       use UserModule
171:       implicit none

173:       PetscInt m,n
174:       PetscErrorCode ierr
175:       type(User) userctx

177:       common /param/ hx2,hy2
178:       PetscReal hx2,hy2

180: !  Local variable declararions
181:       Mat     A
182:       Vec     b,x
183:       KSP    ksp
184:       PetscInt Ntot,five,one


187: !  Here we assume use of a grid of size m x n, with all points on the
188: !  interior of the domain, i.e., we do not include the points corresponding
189: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
190: !  is [0,1]x[0,1].

192:       hx2 = (m+1)*(m+1)
193:       hy2 = (n+1)*(n+1)
194:       Ntot = m*n

196:       five = 5
197:       one = 1

199: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

201:       call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,five,PETSC_NULL_INTEGER,A,ierr);CHKERRQ(ierr)
202: !
203: !  Create vectors. Here we create vectors with no memory allocated.
204: !  This way, we can use the data structures already in the program
205: !  by using VecPlaceArray() subroutine at a later stage.
206: !
207:       call VecCreateSeqWithArray(PETSC_COMM_SELF,one,Ntot,PETSC_NULL_SCALAR,b,ierr);CHKERRQ(ierr)
208:       call VecDuplicate(b,x,ierr);CHKERRQ(ierr)

210: !  Create linear solver context. This will be used repeatedly for all
211: !  the linear solves needed.

213:       call KSPCreate(PETSC_COMM_SELF,ksp,ierr);CHKERRQ(ierr)

215:       userctx%x = x
216:       userctx%b = b
217:       userctx%A = A
218:       userctx%ksp = ksp
219:       userctx%m = m
220:       userctx%n = n

222:       return
223:       end
224: ! -----------------------------------------------------------------------

226: !   Solves -div (rho grad psi) = F using finite differences.
227: !   rho is a 2-dimensional array of size m by n, stored in Fortran
228: !   style by columns. userb is a standard one-dimensional array.

230:       subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
231:       use UserModule
232:       implicit none

234:       PetscErrorCode ierr
235:       type(User) userctx
236:       PetscScalar rho(*),userb(*),userx(*)


239:       common /param/ hx2,hy2
240:       PetscReal hx2,hy2

242:       PC   pc
243:       KSP ksp
244:       Vec  b,x
245:       Mat  A
246:       PetscInt m,n,one
247:       PetscInt i,j,II,JJ
248:       PetscScalar  v

250:       one  = 1
251:       x    = userctx%x
252:       b    = userctx%b
253:       A    = userctx%A
254:       ksp  = userctx%ksp
255:       m    = userctx%m
256:       n    = userctx%n

258: !  This is not the most efficient way of generating the matrix,
259: !  but let's not worry about it.  We should have separate code for
260: !  the four corners, each edge and then the interior. Then we won't
261: !  have the slow if-tests inside the loop.
262: !
263: !  Compute the operator
264: !          -div rho grad
265: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
266: !  is assumed to be given on the same grid as the finite difference
267: !  stencil is applied.  For a staggered grid, one would have to change
268: !  things slightly.

270:       II = 0
271:       do 110 j=1,n
272:          do 100 i=1,m
273:             if (j .gt. 1) then
274:                JJ = II - m
275:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
276:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
277:             endif
278:             if (j .lt. n) then
279:                JJ = II + m
280:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
281:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
282:             endif
283:             if (i .gt. 1) then
284:                JJ = II - 1
285:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
286:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
287:             endif
288:             if (i .lt. m) then
289:                JJ = II + 1
290:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
291:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
292:             endif
293:             v = 2*rho(II+1)*(hx2+hy2)
294:             call MatSetValues(A,one,II,one,II,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
295:             II = II+1
296:  100     continue
297:  110  continue
298: !
299: !     Assemble matrix
300: !
301:       call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)
302:       call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)

304: !
305: !     Set operators. Here the matrix that defines the linear system
306: !     also serves as the preconditioning matrix. Since all the matrices
307: !     will have the same nonzero pattern here, we indicate this so the
308: !     linear solvers can take advantage of this.
309: !
310:       call KSPSetOperators(ksp,A,A,ierr);CHKERRQ(ierr)

312: !
313: !     Set linear solver defaults for this problem (optional).
314: !     - Here we set it to use direct LU factorization for the solution
315: !
316:       call KSPGetPC(ksp,pc,ierr);CHKERRQ(ierr)
317:       call PCSetType(pc,PCLU,ierr);CHKERRQ(ierr)

319: !
320: !     Set runtime options, e.g.,
321: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
322: !     These options will override those specified above as long as
323: !     KSPSetFromOptions() is called _after_ any other customization
324: !     routines.
325: !
326: !     Run the program with the option -help to see all the possible
327: !     linear solver options.
328: !
329:       call KSPSetFromOptions(ksp,ierr);CHKERRQ(ierr)

331: !
332: !     This allows the PETSc linear solvers to compute the solution
333: !     directly in the user's array rather than in the PETSc vector.
334: !
335: !     This is essentially a hack and not highly recommend unless you
336: !     are quite comfortable with using PETSc. In general, users should
337: !     write their entire application using PETSc vectors rather than
338: !     arrays.
339: !
340:       call VecPlaceArray(x,userx,ierr);CHKERRQ(ierr)
341:       call VecPlaceArray(b,userb,ierr);CHKERRQ(ierr)

343: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
344: !                      Solve the linear system
345: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

347:       call KSPSolve(ksp,b,x,ierr);CHKERRQ(ierr)

349:       call VecResetArray(x,ierr);CHKERRQ(ierr)
350:       call VecResetArray(b,ierr);CHKERRQ(ierr)

352:       return
353:       end

355: ! ------------------------------------------------------------------------

357:       subroutine UserFinalizeLinearSolver(userctx,ierr)
358:       use UserModule
359:       implicit none

361:       PetscErrorCode ierr
362:       type(User) userctx

364: !
365: !     We are all done and don't need to solve any more linear systems, so
366: !     we free the work space.  All PETSc objects should be destroyed when
367: !     they are no longer needed.
368: !
369:       call VecDestroy(userctx%x,ierr);CHKERRQ(ierr)
370:       call VecDestroy(userctx%b,ierr);CHKERRQ(ierr)
371:       call MatDestroy(userctx%A,ierr);CHKERRQ(ierr)
372:       call KSPDestroy(userctx%ksp,ierr);CHKERRQ(ierr)

374:       return
375:       end