Actual source code: subcomm.c

  1: #define PETSC_DLL
  2: /*
  3:      Provides utility routines for split MPI communicator.
  4: */
 5:  #include petscsys.h

  9: PetscErrorCode  PetscSubcommDestroy(PetscSubcomm psubcomm)
 10: {

 14:   MPI_Comm_free(&psubcomm->dupparent);
 15:   MPI_Comm_free(&psubcomm->comm);
 16:   PetscFree(psubcomm);
 17:   return(0);
 18: }

 22: /*@C
 23:   PetscSubcommCreate - Create a PetscSubcomm context.

 25:    Collective on MPI_Comm

 27:    Input Parameter:
 28: +  comm - MPI communicator
 29: -  nsubcomm - the number of subcommunicators to be created

 31:    Output Parameter:
 32: .  psubcomm - location to store the PetscSubcomm context


 35:    Notes:
 36:    To avoid data scattering from subcomm back to original comm, we create subcommunicators 
 37:    by iteratively taking a process into a subcommunicator. 
 38:    Example: size=4, nsubcomm=(*psubcomm)->n=3
 39:      comm=(*psubcomm)->parent:
 40:       rank:     [0]  [1]  [2]  [3]
 41:       color:     0    1    2    0

 43:      subcomm=(*psubcomm)->comm:
 44:       subrank:  [0]  [0]  [0]  [1]    

 46:      dupcomm=(*psubcomm)->dupparent:
 47:       duprank:  [0]  [2]  [3]  [1]

 49:      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
 50:            subcomm[color = 1] has subsize=1, owns process [1]
 51:            subcomm[color = 2] has subsize=1, owns process [2]
 52:            dupcomm has same number of processes as comm, and its duprank maps
 53:            processes in subcomm contiguously into a 1d array:
 54:             duprank: [0] [1]      [2]         [3]
 55:             rank:    [0] [3]      [1]         [2]
 56:                     subcomm[0] subcomm[1]  subcomm[2]

 58:    Level: advanced

 60: .keywords: communicator, create

 62: .seealso: PetscSubcommDestroy()
 63: @*/
 64: PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscInt nsubcomm,PetscSubcomm *psubcomm)
 65: {
 67:   PetscMPIInt    rank,size,*subsize,duprank,subrank;
 68:   PetscInt       np_subcomm,nleftover,i,j,color;
 69:   MPI_Comm       subcomm=0,dupcomm=0;
 70:   PetscSubcomm   psubcomm_tmp;

 73:   MPI_Comm_rank(comm,&rank);
 74:   MPI_Comm_size(comm,&size);
 75:   if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);

 77:   /* get size of each subcommunicator */
 78:   PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
 79:   np_subcomm = size/nsubcomm;
 80:   nleftover  = size - nsubcomm*np_subcomm;
 81:   for (i=0; i<nsubcomm; i++){
 82:     subsize[i] = np_subcomm;
 83:     if (i<nleftover) subsize[i]++;
 84:   }

 86:   /* find color for this proc */
 87:   color   = rank%nsubcomm;
 88:   subrank = rank/nsubcomm;

 90:   MPI_Comm_split(comm,color,subrank,&subcomm);

 92:   j = 0; duprank = 0;
 93:   for (i=0; i<nsubcomm; i++){
 94:     if (j == color){
 95:       duprank += subrank;
 96:       break;
 97:     }
 98:     duprank += subsize[i]; j++;
 99:   }
100:   PetscFree(subsize);
101: 
102:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
103:   MPI_Comm_split(comm,0,duprank,&dupcomm);
104: 
105:   PetscNew(struct _n_PetscSubcomm,&psubcomm_tmp);
106:   psubcomm_tmp->parent    = comm;
107:   psubcomm_tmp->dupparent = dupcomm;
108:   psubcomm_tmp->comm      = subcomm;
109:   psubcomm_tmp->n         = nsubcomm;
110:   psubcomm_tmp->color     = color;
111:   *psubcomm = psubcomm_tmp;
112:   return(0);
113: }