Actual source code: ex4f.F
petsc-3.7.3 2016-08-01
1: !
2: ! This introductory example illustrates running PETSc on a subset
3: ! of processes
4: !
5: !/*T
6: ! Concepts: introduction to PETSc;
7: ! Concepts: process^subset set PETSC_COMM_WORLD
8: ! Processors: 2
9: !T*/
10: ! -----------------------------------------------------------------------
12: program main
13: implicit none
14: #include <petsc/finclude/petscsys.h>
15: PetscErrorCode ierr
16: PetscMPIInt rank, size
18: ! We must call MPI_Init() first, making us, not PETSc, responsible
19: ! for MPI
21: call MPI_Init(ierr)
23: ! We can now change the communicator universe for PETSc
25: call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
26: call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,2),0, &
27: & PETSC_COMM_WORLD,ierr)
29: ! Every PETSc routine should begin with the PetscInitialize()
30: ! routine.
32: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
34: ! The following MPI calls return the number of processes being used
35: ! and the rank of this process in the group.
37: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
38: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
41: ! Here we would like to print only one message that represents all
42: ! the processes in the group.
43: if (rank .eq. 0) write(6,100) size,rank
44: 100 format('No of Procs = ',i4,' rank = ',i4)
46: ! Always call PetscFinalize() before exiting a program. This
47: ! routine - finalizes the PETSc libraries as well as MPI - provides
48: ! summary and diagnostic information if certain runtime options are
49: ! chosen (e.g., -log_summary). See PetscFinalize() manpage for more
50: ! information.
52: call PetscFinalize(ierr)
54: call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
57: ! Since we initialized MPI, we must call MPI_Finalize()
59: call MPI_Finalize(ierr)
60: end