Actual source code: ex4f.F
1: !
2: ! This introductory example illustrates running PETSc on a subset
3: ! of processes
4: !
5: !/*T
6: ! Concepts: introduction to PETSc;
7: ! Concepts: process^subset set PETSC_COMM_WORLD
8: ! Processors: 2
9: !T*/
10: ! -----------------------------------------------------------------------
12: program main
13: #include <petsc/finclude/petscsys.h>
14: use petscmpi ! or mpi or mpi_f08
15: use petscsys
16: implicit none
17: PetscErrorCode ierr
18: PetscMPIInt rank, size,grank,zero,two
19: PetscReal globalrank
21: ! We must call MPI_Init() first, making us, not PETSc, responsible
22: ! for MPI
24: call MPI_Init(ierr)
25: #if defined(PETSC_HAVE_ELEMENTAL)
26: call PetscElementalInitializePackage(ierr)
27: #endif
28: ! We can now change the communicator universe for PETSc
30: zero = 0
31: two = 2
32: call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
33: call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),zero, &
34: & PETSC_COMM_WORLD,ierr)
36: ! Every PETSc routine should begin with the PetscInitialize()
37: ! routine.
38: call PetscInitializeNoArguments(ierr)
39: if (ierr .ne. 0) then
40: print*,'Unable to initialize PETSc'
41: stop
42: endif
44: ! The following MPI calls return the number of processes being used
45: ! and the rank of this process in the group.
47: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
48: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
51: ! Here we would like to print only one message that represents all
52: ! the processes in the group. Sleep so that IO from different ranks
53: ! don't get mixed up. Note this is not an ideal solution
54: call MPI_Comm_rank(MPI_COMM_WORLD,grank,ierr)
55: globalrank = grank
56: call PetscSleep(globalrank,ierr)
57: if (rank .eq. 0) write(6,100) size,rank
58: 100 format('No of Procs = ',i4,' rank = ',i4)
60: ! Always call PetscFinalize() before exiting a program. This
61: ! routine - finalizes the PETSc libraries as well as MPI - provides
62: ! summary and diagnostic information if certain runtime options are
63: ! chosen (e.g., -log_view). See PetscFinalize() manpage for more
64: ! information.
66: call PetscFinalize(ierr)
67: call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
68: #if defined(PETSC_HAVE_ELEMENTAL)
69: call PetscElementalFinalizePackage(ierr)
70: #endif
73: ! Since we initialized MPI, we must call MPI_Finalize()
75: call MPI_Finalize(ierr)
76: end
78: !/*TEST
79: !
80: ! test:
81: ! nsize: 5
82: ! filter: sort -b
83: ! filter_output: sort -b
84: ! requires: !cuda
85: !
86: !TEST*/