Actual source code: subcomm.c

petsc-3.5.4 2015-05-23
Report Typos and Errors
  2: /*
  3:      Provides utility routines for split MPI communicator.
  4: */
  5: #include <petscsys.h>    /*I   "petscsys.h"    I*/
  6: #include <petsc-private/threadcommimpl.h> /* Petsc_ThreadComm_keyval */
  7: #include <petscviewer.h>

  9: const char *const PetscSubcommTypes[] = {"GENERAL","CONTIGUOUS","INTERLACED","PetscSubcommType","PETSC_SUBCOMM_",0};

 11: extern PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm);
 12: extern PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm);
 15: PetscErrorCode PetscSubcommSetFromOptions(PetscSubcomm psubcomm)
 16: {
 18:   PetscSubcommType type;
 19:   PetscBool      flg;

 22:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"Must call PetscSubcommCreate firt");
 23:   type = psubcomm->type;
 24:   PetscOptionsEnum("-psubcomm_type","PETSc subcommunicator","PetscSubcommSetType",PetscSubcommTypes,(PetscEnum)type,(PetscEnum*)&type,&flg);
 25:   if (flg && psubcomm->type != type) {
 26:     /* free old structures */
 27:     PetscCommDestroy(&(psubcomm)->dupparent);
 28:     PetscCommDestroy(&(psubcomm)->comm);
 29:     PetscFree((psubcomm)->subsize);
 30:     switch (type) {
 31:     case PETSC_SUBCOMM_GENERAL:
 32:       SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Runtime option PETSC_SUBCOMM_GENERAL is not supported, use PetscSubcommSetTypeGeneral()");
 33:     case PETSC_SUBCOMM_CONTIGUOUS:
 34:       PetscSubcommCreate_contiguous(psubcomm);
 35:       break;
 36:     case PETSC_SUBCOMM_INTERLACED:
 37:       PetscSubcommCreate_interlaced(psubcomm);
 38:       break;
 39:     default:
 40:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"PetscSubcommType %s is not supported yet",PetscSubcommTypes[type]);
 41:     }
 42:   }

 44:   PetscOptionsHasName(NULL, "-psubcomm_view", &flg);
 45:   if (flg) {
 46:     PetscSubcommView(psubcomm,PETSC_VIEWER_STDOUT_WORLD);
 47:   }
 48:   return(0);
 49: }

 53: PetscErrorCode PetscSubcommView(PetscSubcomm psubcomm,PetscViewer viewer)
 54: {
 55:   PetscErrorCode    ierr;
 56:   PetscBool         iascii;
 57:   PetscViewerFormat format;

 60:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
 61:   if (iascii) {
 62:     PetscViewerGetFormat(viewer,&format);
 63:     if (format == PETSC_VIEWER_DEFAULT) {
 64:       MPI_Comm    comm=psubcomm->parent;
 65:       PetscMPIInt rank,size,subsize,subrank,duprank;

 67:       MPI_Comm_size(comm,&size);
 68:       PetscViewerASCIIPrintf(viewer,"PetscSubcomm type %s with total %d MPI processes:\n",PetscSubcommTypes[psubcomm->type],size);
 69:       MPI_Comm_rank(comm,&rank);
 70:       MPI_Comm_size(psubcomm->comm,&subsize);
 71:       MPI_Comm_rank(psubcomm->comm,&subrank);
 72:       MPI_Comm_rank(psubcomm->dupparent,&duprank);
 73:       PetscSynchronizedPrintf(comm,"  [%d], color %d, sub-size %d, sub-rank %d, duprank %d\n",rank,psubcomm->color,subsize,subrank,duprank);
 74:       PetscSynchronizedFlush(comm,PETSC_STDOUT);
 75:     }
 76:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported yet");
 77:   return(0);
 78: }

 82: /*@C
 83:   PetscSubcommSetNumber - Set total number of subcommunicators.

 85:    Collective on MPI_Comm

 87:    Input Parameter:
 88: +  psubcomm - PetscSubcomm context
 89: -  nsubcomm - the total number of subcommunicators in psubcomm

 91:    Level: advanced

 93: .keywords: communicator

 95: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetType(),PetscSubcommSetTypeGeneral()
 96: @*/
 97: PetscErrorCode  PetscSubcommSetNumber(PetscSubcomm psubcomm,PetscInt nsubcomm)
 98: {
100:   MPI_Comm       comm=psubcomm->parent;
101:   PetscMPIInt    msub,size;

104:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate() first");
105:   MPI_Comm_size(comm,&size);
106:   PetscMPIIntCast(nsubcomm,&msub);
107:   if (msub < 1 || msub > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %d cannot be < 1 or > input comm size %d",msub,size);

109:   psubcomm->n = msub;
110:   return(0);
111: }

115: /*@C
116:   PetscSubcommSetType - Set type of subcommunicators.

118:    Collective on MPI_Comm

120:    Input Parameter:
121: +  psubcomm - PetscSubcomm context
122: -  subcommtype - subcommunicator type, PETSC_SUBCOMM_CONTIGUOUS,PETSC_SUBCOMM_INTERLACED

124:    Level: advanced

126: .keywords: communicator

128: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetTypeGeneral()
129: @*/
130: PetscErrorCode  PetscSubcommSetType(PetscSubcomm psubcomm,PetscSubcommType subcommtype)
131: {

135:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
136:   if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);

138:   if (subcommtype == PETSC_SUBCOMM_CONTIGUOUS) {
139:     PetscSubcommCreate_contiguous(psubcomm);
140:   } else if (subcommtype == PETSC_SUBCOMM_INTERLACED) {
141:     PetscSubcommCreate_interlaced(psubcomm);
142:   } else SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %s is not supported yet",PetscSubcommTypes[subcommtype]);
143:   return(0);
144: }

148: /*@C
149:   PetscSubcommSetTypeGeneral - Set a PetscSubcomm from user's specifications

151:    Collective on MPI_Comm

153:    Input Parameter:
154: +  psubcomm - PetscSubcomm context
155: .  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
156: -  subrank - rank in the subcommunicator

158:    Level: advanced

160: .keywords: communicator, create

162: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
163: @*/
164: PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank)
165: {
167:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
168:   PetscMPIInt    size,icolor,duprank,*recvbuf,sendbuf[3],mysubsize,rank,*subsize;
169:   PetscMPIInt    i,nsubcomm=psubcomm->n;

172:   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
173:   if (nsubcomm < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",nsubcomm);

175:   MPI_Comm_split(comm,color,subrank,&subcomm);

177:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
178:   MPI_Comm_size(comm,&size);
179:   PetscMalloc1(2*size,&recvbuf);
180: 
181:   MPI_Comm_rank(comm,&rank);
182:   MPI_Comm_size(subcomm,&mysubsize);
183: 
184:   sendbuf[0] = color;
185:   sendbuf[1] = mysubsize;
186:   MPI_Allgather(sendbuf,2,MPI_INT,recvbuf,2,MPI_INT,comm);

188:   PetscCalloc1(nsubcomm,&subsize);
189:   for (i=0; i<2*size; i+=2) {
190:     subsize[recvbuf[i]] = recvbuf[i+1];
191:   }
192:   PetscFree(recvbuf);
193: 
194:   duprank = 0;
195:   for (icolor=0; icolor<nsubcomm; icolor++) {
196:     if (icolor != color) { /* not color of this process */
197:       duprank += subsize[icolor];
198:     } else {
199:       duprank += subrank;
200:       break;
201:     }
202:   }
203:   MPI_Comm_split(comm,0,duprank,&dupcomm);
204: 
205:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
206:   PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
207:   MPI_Comm_free(&dupcomm);
208:   MPI_Comm_free(&subcomm);

210:   psubcomm->color   = color;
211:   psubcomm->subsize = subsize;
212:   psubcomm->type    = PETSC_SUBCOMM_GENERAL;
213:   return(0);
214: }

218: PetscErrorCode  PetscSubcommDestroy(PetscSubcomm *psubcomm)
219: {

223:   if (!*psubcomm) return(0);
224:   PetscCommDestroy(&(*psubcomm)->dupparent);
225:   PetscCommDestroy(&(*psubcomm)->comm);
226:   PetscFree((*psubcomm)->subsize);
227:   PetscFree((*psubcomm));
228:   return(0);
229: }

233: /*@C
234:   PetscSubcommCreate - Create a PetscSubcomm context.

236:    Collective on MPI_Comm

238:    Input Parameter:
239: .  comm - MPI communicator

241:    Output Parameter:
242: .  psubcomm - location to store the PetscSubcomm context

244:    Level: advanced

246: .keywords: communicator, create

248: .seealso: PetscSubcommDestroy()
249: @*/
250: PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscSubcomm *psubcomm)
251: {
253:   PetscMPIInt    rank,size;

256:   PetscNew(psubcomm);

258:   /* set defaults */
259:   MPI_Comm_rank(comm,&rank);
260:   MPI_Comm_size(comm,&size);

262:   (*psubcomm)->parent    = comm;
263:   (*psubcomm)->dupparent = comm;
264:   (*psubcomm)->comm      = PETSC_COMM_SELF;
265:   (*psubcomm)->n         = size;
266:   (*psubcomm)->color     = rank;
267:   (*psubcomm)->subsize   = NULL;
268:   (*psubcomm)->type      = PETSC_SUBCOMM_INTERLACED;
269:   return(0);
270: }

274: PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
275: {
277:   PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
278:   PetscMPIInt    np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
279:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

282:   MPI_Comm_rank(comm,&rank);
283:   MPI_Comm_size(comm,&size);

285:   /* get size of each subcommunicator */
286:   PetscMalloc1((1+nsubcomm),&subsize);

288:   np_subcomm = size/nsubcomm;
289:   nleftover  = size - nsubcomm*np_subcomm;
290:   for (i=0; i<nsubcomm; i++) {
291:     subsize[i] = np_subcomm;
292:     if (i<nleftover) subsize[i]++;
293:   }

295:   /* get color and subrank of this proc */
296:   rankstart = 0;
297:   for (i=0; i<nsubcomm; i++) {
298:     if (rank >= rankstart && rank < rankstart+subsize[i]) {
299:       color   = i;
300:       subrank = rank - rankstart;
301:       duprank = rank;
302:       break;
303:     } else rankstart += subsize[i];
304:   }

306:   MPI_Comm_split(comm,color,subrank,&subcomm);

308:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
309:   MPI_Comm_split(comm,0,duprank,&dupcomm);
310:   {
311:     PetscThreadComm tcomm;
312:     PetscCommGetThreadComm(comm,&tcomm);
313:     MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);
314:     tcomm->refct++;
315:     MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);
316:     tcomm->refct++;
317:   }
318:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
319:   PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
320:   MPI_Comm_free(&dupcomm);
321:   MPI_Comm_free(&subcomm);

323:   psubcomm->color   = color;
324:   psubcomm->subsize = subsize;
325:   psubcomm->type    = PETSC_SUBCOMM_CONTIGUOUS;
326:   return(0);
327: }

331: /*
332:    Note:
333:    In PCREDUNDANT, to avoid data scattering from subcomm back to original comm, we create subcommunicators
334:    by iteratively taking a process into a subcommunicator.
335:    Example: size=4, nsubcomm=(*psubcomm)->n=3
336:      comm=(*psubcomm)->parent:
337:       rank:     [0]  [1]  [2]  [3]
338:       color:     0    1    2    0

340:      subcomm=(*psubcomm)->comm:
341:       subrank:  [0]  [0]  [0]  [1]

343:      dupcomm=(*psubcomm)->dupparent:
344:       duprank:  [0]  [2]  [3]  [1]

346:      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
347:            subcomm[color = 1] has subsize=1, owns process [1]
348:            subcomm[color = 2] has subsize=1, owns process [2]
349:            dupcomm has same number of processes as comm, and its duprank maps
350:            processes in subcomm contiguously into a 1d array:
351:             duprank: [0] [1]      [2]         [3]
352:             rank:    [0] [3]      [1]         [2]
353:                     subcomm[0] subcomm[1]  subcomm[2]
354: */

356: PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
357: {
359:   PetscMPIInt    rank,size,*subsize,duprank,subrank;
360:   PetscMPIInt    np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
361:   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

364:   MPI_Comm_rank(comm,&rank);
365:   MPI_Comm_size(comm,&size);

367:   /* get size of each subcommunicator */
368:   PetscMalloc1((1+nsubcomm),&subsize);

370:   np_subcomm = size/nsubcomm;
371:   nleftover  = size - nsubcomm*np_subcomm;
372:   for (i=0; i<nsubcomm; i++) {
373:     subsize[i] = np_subcomm;
374:     if (i<nleftover) subsize[i]++;
375:   }

377:   /* find color for this proc */
378:   color   = rank%nsubcomm;
379:   subrank = rank/nsubcomm;

381:   MPI_Comm_split(comm,color,subrank,&subcomm);

383:   j = 0; duprank = 0;
384:   for (i=0; i<nsubcomm; i++) {
385:     if (j == color) {
386:       duprank += subrank;
387:       break;
388:     }
389:     duprank += subsize[i]; j++;
390:   }

392:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
393:   MPI_Comm_split(comm,0,duprank,&dupcomm);
394:   {
395:     PetscThreadComm tcomm;
396:     PetscCommGetThreadComm(comm,&tcomm);
397:     MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);
398:     tcomm->refct++;
399:     MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);
400:     tcomm->refct++;
401:   }
402:   PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
403:   PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
404:   MPI_Comm_free(&dupcomm);
405:   MPI_Comm_free(&subcomm);

407:   psubcomm->color   = color;
408:   psubcomm->subsize = subsize;
409:   psubcomm->type    = PETSC_SUBCOMM_INTERLACED;
410:   return(0);
411: }