1: !
2: !
3: !/*T
4: ! Concepts: KSP^basic sequential example
5: ! Concepts: KSP^Laplacian, 2d
6: ! Concepts: Laplacian, 2d
7: ! Processors: 1
8: !T*/
9: ! -----------------------------------------------------------------------
11: module UserModule
12: #include <petsc/finclude/petscksp.h> 13: use petscksp
14: type User
15: Vec x
16: Vec b
17: Mat A
18: KSP ksp
19: PetscInt m
20: PetscInt n
21: end type User
22: end module
24: program main
25: use UserModule
26: implicit none
28: ! User-defined context that contains all the data structures used
29: ! in the linear solution process.
31: ! Vec x,b /* solution vector, right hand side vector and work vector */
32: ! Mat A /* sparse matrix */
33: ! KSP ksp /* linear solver context */
34: ! int m,n /* grid dimensions */
35: !
36: ! Since we cannot store Scalars and integers in the same context,
37: ! we store the integers/pointers in the user-defined context, and
38: ! the scalar values are carried in the common block.
39: ! The scalar values in this simplistic example could easily
40: ! be recalculated in each routine, where they are needed.
41: !
42: ! Scalar hx2,hy2 /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
44: ! Note: Any user-defined Fortran routines MUST be declared as external.
46: external UserInitializeLinearSolver
47: external UserFinalizeLinearSolver
48: external UserDoLinearSolver
50: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
51: ! Variable declarations
52: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
54: PetscScalar hx,hy,x,y
55: type(User) userctx
56: PetscErrorCode ierr
57: PetscInt m,n,t,tmax,i,j
58: PetscBool flg
59: PetscMPIInt size
60: PetscReal enorm
61: PetscScalar cnorm
62: PetscScalar,ALLOCATABLE :: userx(:,:)
63: PetscScalar,ALLOCATABLE :: userb(:,:)
64: PetscScalar,ALLOCATABLE :: solution(:,:)
65: PetscScalar,ALLOCATABLE :: rho(:,:)
67: PetscReal hx2,hy2
68: common /param/ hx2,hy2
70: tmax = 2
71: m = 6
72: n = 7
74: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
75: ! Beginning of program
76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
78: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
79: if (ierr .ne. 0) then
80: print*,'Unable to initialize PETSc'
81: stop
82: endif
83: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
84: if (size .ne. 1) then; SETERRA(PETSC_COMM_WORLD,PETSC_ERR_WRONG_MPI_SIZE,'This is a uniprocessor example only'); endif
86: ! The next two lines are for testing only; these allow the user to
87: ! decide the grid size at runtime.
89: call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-m',m,flg,ierr);CHKERRA(ierr)
90: call PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-n',n,flg,ierr);CHKERRA(ierr)
92: ! Create the empty sparse matrix and linear solver data structures
94: call UserInitializeLinearSolver(m,n,userctx,ierr);CHKERRA(ierr)
96: ! Allocate arrays to hold the solution to the linear system. This
97: ! approach is not normally done in PETSc programs, but in this case,
98: ! since we are calling these routines from a non-PETSc program, we
99: ! would like to reuse the data structures from another code. So in
100: ! the context of a larger application these would be provided by
101: ! other (non-PETSc) parts of the application code.
103: ALLOCATE (userx(m,n),userb(m,n),solution(m,n))
105: ! Allocate an array to hold the coefficients in the elliptic operator
107: ALLOCATE (rho(m,n))
109: ! Fill up the array rho[] with the function rho(x,y) = x; fill the
110: ! right-hand-side b[] and the solution with a known problem for testing.
112: hx = 1.0/real(m+1)
113: hy = 1.0/real(n+1)
114: y = hy
115: do 20 j=1,n
116: x = hx
117: do 10 i=1,m
118: rho(i,j) = x
119: solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
120: userb(i,j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y) + &
121: & 8*PETSC_PI*PETSC_PI*x*sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
122: x = x + hx
123: 10 continue
124: y = y + hy
125: 20 continue
127: ! Loop over a bunch of timesteps, setting up and solver the linear
128: ! system for each time-step.
129: ! Note that this loop is somewhat artificial. It is intended to
130: ! demonstrate how one may reuse the linear solvers in each time-step.
132: do 100 t=1,tmax
133: call UserDoLinearSolver(rho,userctx,userb,userx,ierr);CHKERRA(ierr)
135: ! Compute error: Note that this could (and usually should) all be done
136: ! using the PETSc vector operations. Here we demonstrate using more
137: ! standard programming practices to show how they may be mixed with
138: ! PETSc.
139: cnorm = 0.0
140: do 90 j=1,n
141: do 80 i=1,m
142: cnorm = cnorm + PetscConj(solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
143: 80 continue
144: 90 continue
145: enorm = PetscRealPart(cnorm*hx*hy)
146: write(6,115) m,n,enorm
147: 115 format ('m = ',I2,' n = ',I2,' error norm = ',1PE11.4)
148: 100 continue
150: ! We are finished solving linear systems, so we clean up the
151: ! data structures.
153: DEALLOCATE (userx,userb,solution,rho)
155: call UserFinalizeLinearSolver(userctx,ierr);CHKERRA(ierr)
156: call PetscFinalize(ierr)
157: end
159: ! ----------------------------------------------------------------
160: subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
161: use UserModule
162: implicit none
164: PetscInt m,n
165: PetscErrorCode ierr
166: type(User) userctx
168: common /param/ hx2,hy2
169: PetscReal hx2,hy2
171: ! Local variable declararions
172: Mat A
173: Vec b,x
174: KSP ksp
175: PetscInt Ntot,five,one
178: ! Here we assume use of a grid of size m x n, with all points on the
179: ! interior of the domain, i.e., we do not include the points corresponding
180: ! to homogeneous Dirichlet boundary conditions. We assume that the domain
181: ! is [0,1]x[0,1].
183: hx2 = (m+1)*(m+1)
184: hy2 = (n+1)*(n+1)
185: Ntot = m*n
187: five = 5
188: one = 1
190: ! Create the sparse matrix. Preallocate 5 nonzeros per row.
192: call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,five,PETSC_NULL_INTEGER,A,ierr);CHKERRQ(ierr)
193: !
194: ! Create vectors. Here we create vectors with no memory allocated.
195: ! This way, we can use the data structures already in the program
196: ! by using VecPlaceArray() subroutine at a later stage.
197: !
198: call VecCreateSeqWithArray(PETSC_COMM_SELF,one,Ntot,PETSC_NULL_SCALAR,b,ierr);CHKERRQ(ierr)
199: call VecDuplicate(b,x,ierr);CHKERRQ(ierr)
201: ! Create linear solver context. This will be used repeatedly for all
202: ! the linear solves needed.
204: call KSPCreate(PETSC_COMM_SELF,ksp,ierr);CHKERRQ(ierr)
206: userctx%x = x
207: userctx%b = b
208: userctx%A = A
209: userctx%ksp = ksp
210: userctx%m = m
211: userctx%n = n
213: return
214: end
215: ! -----------------------------------------------------------------------
217: ! Solves -div (rho grad psi) = F using finite differences.
218: ! rho is a 2-dimensional array of size m by n, stored in Fortran
219: ! style by columns. userb is a standard one-dimensional array.
221: subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
222: use UserModule
223: implicit none
225: PetscErrorCode ierr
226: type(User) userctx
227: PetscScalar rho(*),userb(*),userx(*)
230: common /param/ hx2,hy2
231: PetscReal hx2,hy2
233: PC pc
234: KSP ksp
235: Vec b,x
236: Mat A
237: PetscInt m,n,one
238: PetscInt i,j,II,JJ
239: PetscScalar v
241: one = 1
242: x = userctx%x
243: b = userctx%b
244: A = userctx%A
245: ksp = userctx%ksp
246: m = userctx%m
247: n = userctx%n
249: ! This is not the most efficient way of generating the matrix,
250: ! but let's not worry about it. We should have separate code for
251: ! the four corners, each edge and then the interior. Then we won't
252: ! have the slow if-tests inside the loop.
253: !
254: ! Compute the operator
255: ! -div rho grad
256: ! on an m by n grid with zero Dirichlet boundary conditions. The rho
257: ! is assumed to be given on the same grid as the finite difference
258: ! stencil is applied. For a staggered grid, one would have to change
259: ! things slightly.
261: II = 0
262: do 110 j=1,n
263: do 100 i=1,m
264: if (j .gt. 1) then
265: JJ = II - m
266: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
267: call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
268: endif
269: if (j .lt. n) then
270: JJ = II + m
271: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
272: call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
273: endif
274: if (i .gt. 1) then
275: JJ = II - 1
276: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
277: call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
278: endif
279: if (i .lt. m) then
280: JJ = II + 1
281: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
282: call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
283: endif
284: v = 2*rho(II+1)*(hx2+hy2)
285: call MatSetValues(A,one,II,one,II,v,INSERT_VALUES,ierr);CHKERRQ(ierr)
286: II = II+1
287: 100 continue
288: 110 continue
289: !
290: ! Assemble matrix
291: !
292: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)
293: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr);CHKERRQ(ierr)
295: !
296: ! Set operators. Here the matrix that defines the linear system
297: ! also serves as the preconditioning matrix. Since all the matrices
298: ! will have the same nonzero pattern here, we indicate this so the
299: ! linear solvers can take advantage of this.
300: !
301: call KSPSetOperators(ksp,A,A,ierr);CHKERRQ(ierr)
303: !
304: ! Set linear solver defaults for this problem (optional).
305: ! - Here we set it to use direct LU factorization for the solution
306: !
307: call KSPGetPC(ksp,pc,ierr);CHKERRQ(ierr)
308: call PCSetType(pc,PCLU,ierr);CHKERRQ(ierr)
310: !
311: ! Set runtime options, e.g.,
312: ! -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
313: ! These options will override those specified above as long as
314: ! KSPSetFromOptions() is called _after_ any other customization
315: ! routines.
316: !
317: ! Run the program with the option -help to see all the possible
318: ! linear solver options.
319: !
320: call KSPSetFromOptions(ksp,ierr);CHKERRQ(ierr)
322: !
323: ! This allows the PETSc linear solvers to compute the solution
324: ! directly in the user's array rather than in the PETSc vector.
325: !
326: ! This is essentially a hack and not highly recommend unless you
327: ! are quite comfortable with using PETSc. In general, users should
328: ! write their entire application using PETSc vectors rather than
329: ! arrays.
330: !
331: call VecPlaceArray(x,userx,ierr);CHKERRQ(ierr)
332: call VecPlaceArray(b,userb,ierr);CHKERRQ(ierr)
334: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
335: ! Solve the linear system
336: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
338: call KSPSolve(ksp,b,x,ierr);CHKERRQ(ierr)
340: call VecResetArray(x,ierr);CHKERRQ(ierr)
341: call VecResetArray(b,ierr);CHKERRQ(ierr)
343: return
344: end
346: ! ------------------------------------------------------------------------
348: subroutine UserFinalizeLinearSolver(userctx,ierr)
349: use UserModule
350: implicit none
352: PetscErrorCode ierr
353: type(User) userctx
355: !
356: ! We are all done and don't need to solve any more linear systems, so
357: ! we free the work space. All PETSc objects should be destroyed when
358: ! they are no longer needed.
359: !
360: call VecDestroy(userctx%x,ierr);CHKERRQ(ierr)
361: call VecDestroy(userctx%b,ierr);CHKERRQ(ierr)
362: call MatDestroy(userctx%A,ierr);CHKERRQ(ierr)
363: call KSPDestroy(userctx%ksp,ierr);CHKERRQ(ierr)
365: return
366: end
368: !
369: !/*TEST
370: !
371: ! test:
372: ! args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
373: ! output_file: output/ex13f90_1.out
374: !
375: !TEST*/