Actual source code: subcomm.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:      Provides utility routines for split MPI communicator.
  4: */
  5: #include <petscsys.h>    /*I   "petscsys.h"    I*/

  7: extern PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm);
  8: extern PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm);

 12: /*@C
 13:   PetscSubcommSetNumber - Set total number of subcommunicators.

 15:    Collective on MPI_Comm

 17:    Input Parameter:
 18: +  psubcomm - PetscSubcomm context
 19: -  nsubcomm - the total number of subcommunicators in psubcomm

 21:    Level: advanced

 23: .keywords: communicator

 25: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetType(),PetscSubcommSetTypeGeneral()
 26: @*/
 27: PetscErrorCode  PetscSubcommSetNumber(PetscSubcomm psubcomm,PetscInt nsubcomm)
 28: {
 30:   MPI_Comm       comm=psubcomm->parent;
 31:   PetscMPIInt    rank,size;

 34:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate() first");
 35:   MPI_Comm_rank(comm,&rank);
 36:   MPI_Comm_size(comm,&size);
 37:   if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);

 39:   psubcomm->n = nsubcomm;
 40:   return(0);
 41: }

 45: /*@C
 46:   PetscSubcommSetType - Set type of subcommunicators.

 48:    Collective on MPI_Comm

 50:    Input Parameter:
 51: +  psubcomm - PetscSubcomm context
 52: -  subcommtype - subcommunicator type, PETSC_SUBCOMM_CONTIGUOUS,PETSC_SUBCOMM_INTERLACED

 54:    Level: advanced

 56: .keywords: communicator

 58: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetTypeGeneral()
 59: @*/
 60: PetscErrorCode  PetscSubcommSetType(PetscSubcomm psubcomm,PetscSubcommType subcommtype)
 61: {

 65:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
 66:   if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);

 68:   if (subcommtype == PETSC_SUBCOMM_CONTIGUOUS){
 69:     PetscSubcommCreate_contiguous(psubcomm);
 70:   } else if (subcommtype == PETSC_SUBCOMM_INTERLACED){
 71:     PetscSubcommCreate_interlaced(psubcomm);
 72:   } else SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %D is not supported yet",subcommtype);
 73:   return(0);
 74: }

 78: /*@C
 79:   PetscSubcommSetTypeGeneral - Set type of subcommunicators from user's specifications

 81:    Collective on MPI_Comm

 83:    Input Parameter:
 84: +  psubcomm - PetscSubcomm context
 85: .  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
 86: .  subrank - rank in the subcommunicator
 87: -  duprank - rank in the dupparent (see PetscSubcomm)

 89:    Level: advanced

 91: .keywords: communicator, create

 93: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
 94: @*/
 95: PetscErrorCode  PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank,PetscMPIInt duprank)
 96: {
 98:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
 99:   PetscMPIInt    size;

102:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
103:   if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);

105:   MPI_Comm_split(comm,color,subrank,&subcomm);

107:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm 
108:      if duprank is not a valid number, then dupcomm is not created - not all applications require dupcomm! */
109:   MPI_Comm_size(comm,&size);
110:   if (duprank == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"duprank==PETSC_DECIDE is not supported yet");
111:   else if (duprank >= 0 && duprank < size){
112:     MPI_Comm_split(comm,0,duprank,&dupcomm);
113:   }
114:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
115:   PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
116:   MPI_Comm_free(&dupcomm);
117:   MPI_Comm_free(&subcomm);
118:   psubcomm->color     = color;
119:   return(0);
120: }

124: PetscErrorCode  PetscSubcommDestroy(PetscSubcomm *psubcomm)
125: {

129:   if (!*psubcomm) return(0);
130:   PetscCommDestroy(&(*psubcomm)->dupparent);
131:   PetscCommDestroy(&(*psubcomm)->comm);
132:   PetscFree((*psubcomm));
133:   return(0);
134: }

138: /*@C
139:   PetscSubcommCreate - Create a PetscSubcomm context.

141:    Collective on MPI_Comm

143:    Input Parameter:
144: .  comm - MPI communicator

146:    Output Parameter:
147: .  psubcomm - location to store the PetscSubcomm context

149:    Level: advanced

151: .keywords: communicator, create

153: .seealso: PetscSubcommDestroy()
154: @*/
155: PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscSubcomm *psubcomm)
156: {

160:   PetscNew(struct _n_PetscSubcomm,psubcomm);
161:   (*psubcomm)->parent = comm;
162:   return(0);
163: }

167: PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
168: {
170:   PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
171:   PetscInt       np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
172:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

175:   MPI_Comm_rank(comm,&rank);
176:   MPI_Comm_size(comm,&size);

178:   /* get size of each subcommunicator */
179:   PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
180:   np_subcomm = size/nsubcomm;
181:   nleftover  = size - nsubcomm*np_subcomm;
182:   for (i=0; i<nsubcomm; i++){
183:     subsize[i] = np_subcomm;
184:     if (i<nleftover) subsize[i]++;
185:   }

187:   /* get color and subrank of this proc */
188:   rankstart = 0;
189:   for (i=0; i<nsubcomm; i++){
190:     if ( rank >= rankstart && rank < rankstart+subsize[i]) {
191:       color   = i;
192:       subrank = rank - rankstart;
193:       duprank = rank;
194:       break;
195:     } else {
196:       rankstart += subsize[i];
197:     }
198:   }
199:   PetscFree(subsize);

201:   MPI_Comm_split(comm,color,subrank,&subcomm);
202: 
203:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
204:   MPI_Comm_split(comm,0,duprank,&dupcomm);
205: 
206:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
207:   PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
208:   MPI_Comm_free(&dupcomm);
209:   MPI_Comm_free(&subcomm);
210:   psubcomm->color     = color;
211:   return(0);
212: }

216: /*
217:    Note:
218:    In PCREDUNDANT, to avoid data scattering from subcomm back to original comm, we create subcommunicators 
219:    by iteratively taking a process into a subcommunicator. 
220:    Example: size=4, nsubcomm=(*psubcomm)->n=3
221:      comm=(*psubcomm)->parent:
222:       rank:     [0]  [1]  [2]  [3]
223:       color:     0    1    2    0

225:      subcomm=(*psubcomm)->comm:
226:       subrank:  [0]  [0]  [0]  [1]    

228:      dupcomm=(*psubcomm)->dupparent:
229:       duprank:  [0]  [2]  [3]  [1]

231:      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
232:            subcomm[color = 1] has subsize=1, owns process [1]
233:            subcomm[color = 2] has subsize=1, owns process [2]
234:            dupcomm has same number of processes as comm, and its duprank maps
235:            processes in subcomm contiguously into a 1d array:
236:             duprank: [0] [1]      [2]         [3]
237:             rank:    [0] [3]      [1]         [2]
238:                     subcomm[0] subcomm[1]  subcomm[2]
239: */

241: PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
242: {
244:   PetscMPIInt    rank,size,*subsize,duprank,subrank;
245:   PetscInt       np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
246:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

249:   MPI_Comm_rank(comm,&rank);
250:   MPI_Comm_size(comm,&size);

252:   /* get size of each subcommunicator */
253:   PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
254:   np_subcomm = size/nsubcomm;
255:   nleftover  = size - nsubcomm*np_subcomm;
256:   for (i=0; i<nsubcomm; i++){
257:     subsize[i] = np_subcomm;
258:     if (i<nleftover) subsize[i]++;
259:   }

261:   /* find color for this proc */
262:   color   = rank%nsubcomm;
263:   subrank = rank/nsubcomm;

265:   MPI_Comm_split(comm,color,subrank,&subcomm);

267:   j = 0; duprank = 0;
268:   for (i=0; i<nsubcomm; i++){
269:     if (j == color){
270:       duprank += subrank;
271:       break;
272:     }
273:     duprank += subsize[i]; j++;
274:   }
275:   PetscFree(subsize);
276: 
277:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
278:   MPI_Comm_split(comm,0,duprank,&dupcomm);
279: 
280:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
281:   PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
282:   MPI_Comm_free(&dupcomm);
283:   MPI_Comm_free(&subcomm);
284:   psubcomm->color     = color;
285:   return(0);
286: }