Actual source code: ex2f.F
1: !
2: !
3: ! Description: Builds a parallel vector with 1 component on the first
4: ! processor, 2 on the second, etc. Then each processor adds
5: ! one to all elements except the last rank.
6: !
7: ! -----------------------------------------------------------------------
9: program main
10: #include <petsc/finclude/petscvec.h>
11: use petscvec
12: implicit none
14: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
15: ! Beginning of program
16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
18: Vec x
19: PetscInt N,i,ione
20: PetscErrorCode ierr
21: PetscMPIInt rank
22: PetscScalar one
24: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
25: if (ierr .ne. 0) then
26: print*,'PetscInitialize failed'
27: stop
28: endif
29: one = 1.0
30: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
32: ! Create a parallel vector.
33: ! - In this case, we specify the size of the local portion on
34: ! each processor, and PETSc computes the global size. Alternatively,
35: ! if we pass the global size and use PETSC_DECIDE for the
36: ! local size PETSc will choose a reasonable partition trying
37: ! to put nearly an equal number of elements on each processor.
39: N = rank + 1
40: call VecCreateMPI(PETSC_COMM_WORLD,N,PETSC_DECIDE,x,ierr)
41: call VecGetSize(x,N,ierr)
42: call VecSet(x,one,ierr)
44: ! Set the vector elements.
45: ! - Note that VecSetValues() uses 0-based row and column numbers
46: ! in Fortran as well as in C.
47: ! - Always specify global locations of vector entries.
48: ! - Each processor can contribute any vector entries,
49: ! regardless of which processor "owns" them; any nonlocal
50: ! contributions will be transferred to the appropriate processor
51: ! during the assembly process.
52: ! - In this example, the flag ADD_VALUES indicates that all
53: ! contributions will be added together.
55: ione = 1
56: do 100 i=0,N-rank-1
57: call VecSetValues(x,ione,i,one,ADD_VALUES,ierr)
58: 100 continue
60: ! Assemble vector, using the 2-step process:
61: ! VecAssemblyBegin(), VecAssemblyEnd()
62: ! Computations can be done while messages are in transition
63: ! by placing code between these two statements.
65: call VecAssemblyBegin(x,ierr)
66: call VecAssemblyEnd(x,ierr)
68: ! Test VecGetValues() with scalar entries
69: if (rank .eq. 0) then
70: ione = 0
71: call VecGetValues(x,ione,i,one,ierr)
72: endif
74: ! View the vector; then destroy it.
76: call VecView(x,PETSC_VIEWER_STDOUT_WORLD,ierr)
77: call VecDestroy(x,ierr)
79: call PetscFinalize(ierr)
80: end
82: !/*TEST
83: !
84: ! test:
85: ! nsize: 2
86: ! filter: grep -v "MPI processes"
87: !
88: !TEST*/