Actual source code: subcomm.c
petsc-3.4.5 2014-06-29
2: /*
3: Provides utility routines for split MPI communicator.
4: */
5: #include <petscsys.h> /*I "petscsys.h" I*/
6: #include <petsc-private/threadcommimpl.h> /* Petsc_ThreadComm_keyval */
8: const char *const PetscSubcommTypes[] = {"GENERAL","CONTIGUOUS","INTERLACED","PetscSubcommType","PETSC_SUBCOMM_",0};
10: extern PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm);
11: extern PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm);
15: /*@C
16: PetscSubcommSetNumber - Set total number of subcommunicators.
18: Collective on MPI_Comm
20: Input Parameter:
21: + psubcomm - PetscSubcomm context
22: - nsubcomm - the total number of subcommunicators in psubcomm
24: Level: advanced
26: .keywords: communicator
28: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetType(),PetscSubcommSetTypeGeneral()
29: @*/
30: PetscErrorCode PetscSubcommSetNumber(PetscSubcomm psubcomm,PetscInt nsubcomm)
31: {
33: MPI_Comm comm=psubcomm->parent;
34: PetscMPIInt rank,size;
37: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate() first");
38: MPI_Comm_rank(comm,&rank);
39: MPI_Comm_size(comm,&size);
40: if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);
42: psubcomm->n = nsubcomm;
43: return(0);
44: }
48: /*@C
49: PetscSubcommSetType - Set type of subcommunicators.
51: Collective on MPI_Comm
53: Input Parameter:
54: + psubcomm - PetscSubcomm context
55: - subcommtype - subcommunicator type, PETSC_SUBCOMM_CONTIGUOUS,PETSC_SUBCOMM_INTERLACED
57: Level: advanced
59: .keywords: communicator
61: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetTypeGeneral()
62: @*/
63: PetscErrorCode PetscSubcommSetType(PetscSubcomm psubcomm,PetscSubcommType subcommtype)
64: {
68: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
69: if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);
71: if (subcommtype == PETSC_SUBCOMM_CONTIGUOUS) {
72: PetscSubcommCreate_contiguous(psubcomm);
73: } else if (subcommtype == PETSC_SUBCOMM_INTERLACED) {
74: PetscSubcommCreate_interlaced(psubcomm);
75: } else SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %D is not supported yet",subcommtype);
76: return(0);
77: }
81: /*@C
82: PetscSubcommSetTypeGeneral - Set type of subcommunicators from user's specifications
84: Collective on MPI_Comm
86: Input Parameter:
87: + psubcomm - PetscSubcomm context
88: . color - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
89: . subrank - rank in the subcommunicator
90: - duprank - rank in the dupparent (see PetscSubcomm)
92: Level: advanced
94: .keywords: communicator, create
96: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
97: @*/
98: PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank,PetscMPIInt duprank)
99: {
101: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
102: PetscMPIInt size;
105: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
106: if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);
108: MPI_Comm_split(comm,color,subrank,&subcomm);
110: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm
111: if duprank is not a valid number, then dupcomm is not created - not all applications require dupcomm! */
112: MPI_Comm_size(comm,&size);
113: if (duprank == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"duprank==PETSC_DECIDE is not supported yet");
114: else if (duprank >= 0 && duprank < size) {
115: MPI_Comm_split(comm,0,duprank,&dupcomm);
116: }
117: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
118: PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
119: MPI_Comm_free(&dupcomm);
120: MPI_Comm_free(&subcomm);
122: psubcomm->color = color;
123: return(0);
124: }
128: PetscErrorCode PetscSubcommDestroy(PetscSubcomm *psubcomm)
129: {
133: if (!*psubcomm) return(0);
134: PetscCommDestroy(&(*psubcomm)->dupparent);
135: PetscCommDestroy(&(*psubcomm)->comm);
136: PetscFree((*psubcomm));
137: return(0);
138: }
142: /*@C
143: PetscSubcommCreate - Create a PetscSubcomm context.
145: Collective on MPI_Comm
147: Input Parameter:
148: . comm - MPI communicator
150: Output Parameter:
151: . psubcomm - location to store the PetscSubcomm context
153: Level: advanced
155: .keywords: communicator, create
157: .seealso: PetscSubcommDestroy()
158: @*/
159: PetscErrorCode PetscSubcommCreate(MPI_Comm comm,PetscSubcomm *psubcomm)
160: {
164: PetscNew(struct _n_PetscSubcomm,psubcomm);
166: (*psubcomm)->parent = comm;
167: return(0);
168: }
172: PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
173: {
175: PetscMPIInt rank,size,*subsize,duprank=-1,subrank=-1;
176: PetscInt np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
177: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
180: MPI_Comm_rank(comm,&rank);
181: MPI_Comm_size(comm,&size);
183: /* get size of each subcommunicator */
184: PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
186: np_subcomm = size/nsubcomm;
187: nleftover = size - nsubcomm*np_subcomm;
188: for (i=0; i<nsubcomm; i++) {
189: subsize[i] = np_subcomm;
190: if (i<nleftover) subsize[i]++;
191: }
193: /* get color and subrank of this proc */
194: rankstart = 0;
195: for (i=0; i<nsubcomm; i++) {
196: if (rank >= rankstart && rank < rankstart+subsize[i]) {
197: color = i;
198: subrank = rank - rankstart;
199: duprank = rank;
200: break;
201: } else rankstart += subsize[i];
202: }
203: PetscFree(subsize);
205: MPI_Comm_split(comm,color,subrank,&subcomm);
207: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
208: MPI_Comm_split(comm,0,duprank,&dupcomm);
209: {
210: PetscThreadComm tcomm;
211: PetscCommGetThreadComm(comm,&tcomm);
212: MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);
213: tcomm->refct++;
214: MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);
215: tcomm->refct++;
216: }
217: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
218: PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
219: MPI_Comm_free(&dupcomm);
220: MPI_Comm_free(&subcomm);
222: psubcomm->color = color;
223: return(0);
224: }
228: /*
229: Note:
230: In PCREDUNDANT, to avoid data scattering from subcomm back to original comm, we create subcommunicators
231: by iteratively taking a process into a subcommunicator.
232: Example: size=4, nsubcomm=(*psubcomm)->n=3
233: comm=(*psubcomm)->parent:
234: rank: [0] [1] [2] [3]
235: color: 0 1 2 0
237: subcomm=(*psubcomm)->comm:
238: subrank: [0] [0] [0] [1]
240: dupcomm=(*psubcomm)->dupparent:
241: duprank: [0] [2] [3] [1]
243: Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
244: subcomm[color = 1] has subsize=1, owns process [1]
245: subcomm[color = 2] has subsize=1, owns process [2]
246: dupcomm has same number of processes as comm, and its duprank maps
247: processes in subcomm contiguously into a 1d array:
248: duprank: [0] [1] [2] [3]
249: rank: [0] [3] [1] [2]
250: subcomm[0] subcomm[1] subcomm[2]
251: */
253: PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
254: {
256: PetscMPIInt rank,size,*subsize,duprank,subrank;
257: PetscInt np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
258: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
261: MPI_Comm_rank(comm,&rank);
262: MPI_Comm_size(comm,&size);
264: /* get size of each subcommunicator */
265: PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
267: np_subcomm = size/nsubcomm;
268: nleftover = size - nsubcomm*np_subcomm;
269: for (i=0; i<nsubcomm; i++) {
270: subsize[i] = np_subcomm;
271: if (i<nleftover) subsize[i]++;
272: }
274: /* find color for this proc */
275: color = rank%nsubcomm;
276: subrank = rank/nsubcomm;
278: MPI_Comm_split(comm,color,subrank,&subcomm);
280: j = 0; duprank = 0;
281: for (i=0; i<nsubcomm; i++) {
282: if (j == color) {
283: duprank += subrank;
284: break;
285: }
286: duprank += subsize[i]; j++;
287: }
288: PetscFree(subsize);
290: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
291: MPI_Comm_split(comm,0,duprank,&dupcomm);
292: {
293: PetscThreadComm tcomm;
294: PetscCommGetThreadComm(comm,&tcomm);
295: MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);
296: tcomm->refct++;
297: MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);
298: tcomm->refct++;
299: }
300: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);
301: PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);
302: MPI_Comm_free(&dupcomm);
303: MPI_Comm_free(&subcomm);
305: psubcomm->color = color;
306: return(0);
307: }