Actual source code: ex2f.F
petsc-3.14.6 2021-03-30
1: !
2: !
3: ! Description: Builds a parallel vector with 1 component on the first
4: ! processor, 2 on the second, etc. Then each processor adds
5: ! one to all elements except the last rank.
6: !
7: !/*T
8: ! Concepts: vectors^assembling
9: ! Processors: n
10: !T*/
11: ! -----------------------------------------------------------------------
13: program main
14: #include <petsc/finclude/petscvec.h>
15: use petscvec
16: implicit none
18: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
19: ! Beginning of program
20: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
22: Vec x
23: PetscInt N,i,ione
24: PetscErrorCode ierr
25: PetscMPIInt rank
26: PetscScalar one
28: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
29: if (ierr .ne. 0) then
30: print*,'PetscInitialize failed'
31: stop
32: endif
33: one = 1.0
34: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
36: ! Create a parallel vector.
37: ! - In this case, we specify the size of the local portion on
38: ! each processor, and PETSc computes the global size. Alternatively,
39: ! if we pass the global size and use PETSC_DECIDE for the
40: ! local size PETSc will choose a reasonable partition trying
41: ! to put nearly an equal number of elements on each processor.
43: N = rank + 1
44: call VecCreateMPI(PETSC_COMM_WORLD,N,PETSC_DECIDE,x,ierr)
45: call VecGetSize(x,N,ierr)
46: call VecSet(x,one,ierr)
48: ! Set the vector elements.
49: ! - Note that VecSetValues() uses 0-based row and column numbers
50: ! in Fortran as well as in C.
51: ! - Always specify global locations of vector entries.
52: ! - Each processor can contribute any vector entries,
53: ! regardless of which processor "owns" them; any nonlocal
54: ! contributions will be transferred to the appropriate processor
55: ! during the assembly process.
56: ! - In this example, the flag ADD_VALUES indicates that all
57: ! contributions will be added together.
59: ione = 1
60: do 100 i=0,N-rank-1
61: call VecSetValues(x,ione,i,one,ADD_VALUES,ierr)
62: 100 continue
64: ! Assemble vector, using the 2-step process:
65: ! VecAssemblyBegin(), VecAssemblyEnd()
66: ! Computations can be done while messages are in transition
67: ! by placing code between these two statements.
69: call VecAssemblyBegin(x,ierr)
70: call VecAssemblyEnd(x,ierr)
72: ! Test VecGetValues() with scalar entries
73: if (rank .eq. 0) then
74: ione = 0
75: call VecGetValues(x,ione,i,one,ierr)
76: endif
78: ! View the vector; then destroy it.
80: call VecView(x,PETSC_VIEWER_STDOUT_WORLD,ierr)
81: call VecDestroy(x,ierr)
83: call PetscFinalize(ierr)
84: end
86: !/*TEST
87: !
88: ! test:
89: ! nsize: 2
90: ! filter: grep -v "MPI processes"
91: !
92: !TEST*/