Actual source code: ex4f.F

petsc-3.13.6 2020-09-29
Report Typos and Errors
  1: !
  2: !     This introductory example illustrates running PETSc on a subset
  3: !     of processes
  4: !
  5: !/*T
  6: !   Concepts: introduction to PETSc;
  7: !   Concepts: process^subset set PETSC_COMM_WORLD
  8: !   Processors: 2
  9: !T*/
 10: ! -----------------------------------------------------------------------

 12:       program main
 13:  #include <petsc/finclude/petscsys.h>
 14:       use petscsys
 15:       implicit none
 16:       PetscErrorCode ierr
 17:       PetscMPIInt    rank, size,grank,two
 18:       PetscReal globalrank

 20: !     We must call MPI_Init() first, making us, not PETSc, responsible
 21: !     for MPI

 23:       call MPI_Init(ierr)
 24: #if defined(PETSC_HAVE_ELEMENTAL)
 25:       call PetscElementalInitializePackage(ierr)
 26: #endif
 27: !     We can now change the communicator universe for PETSc

 29:       two = 2
 30:       call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
 31:       call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),0,               &
 32:      &     PETSC_COMM_WORLD,ierr)

 34: !     Every PETSc routine should begin with the PetscInitialize()
 35: !     routine.
 36:       call PetscInitializeNoArguments(ierr)
 37:       if (ierr .ne. 0) then
 38:          print*,'Unable to initialize PETSc'
 39:          stop
 40:       endif

 42: !     The following MPI calls return the number of processes being used
 43: !     and the rank of this process in the group.

 45:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 46:       call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)


 49: !     Here we would like to print only one message that represents all
 50: !     the processes in the group. Sleep so that IO from different ranks
 51: !     don't get mixed up. Note this is not an ideal solution
 52:       call MPI_Comm_rank(MPI_COMM_WORLD,grank,ierr)
 53:       globalrank = grank
 54:       call PetscSleep(globalrank,ierr)
 55:       if (rank .eq. 0) write(6,100) size,rank
 56:  100  format('No of Procs = ',i4,' rank = ',i4)

 58: !     Always call PetscFinalize() before exiting a program.  This
 59: !     routine - finalizes the PETSc libraries as well as MPI - provides
 60: !     summary and diagnostic information if certain runtime options are
 61: !     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
 62: !     information.

 64:       call PetscFinalize(ierr)
 65:       call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
 66: #if defined(PETSC_HAVE_ELEMENTAL)
 67:       call PetscElementalFinalizePackage(ierr)
 68: #endif


 71: !     Since we initialized MPI, we must call MPI_Finalize()

 73:       call  MPI_Finalize(ierr)
 74:       end

 76: !/*TEST
 77: !
 78: !   test:
 79: !      nsize: 5
 80: !      filter: sort -b
 81: !      filter_output: sort -b
 82: !      requires: !cuda
 83: !
 84: !TEST*/