Actual source code: iscoloring.c
petsc-3.3-p7 2013-05-11
2: #include <petscis.h> /*I "petscis.h" I*/
4: const char *ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",0};
8: /*@
9: ISColoringDestroy - Destroys a coloring context.
11: Collective on ISColoring
13: Input Parameter:
14: . iscoloring - the coloring context
16: Level: advanced
18: .seealso: ISColoringView(), MatGetColoring()
19: @*/
20: PetscErrorCode ISColoringDestroy(ISColoring *iscoloring)
21: {
22: PetscInt i;
26: if (!*iscoloring) return(0);
28: if (--(*iscoloring)->refct > 0) {*iscoloring = 0; return(0);}
30: if ((*iscoloring)->is) {
31: for (i=0; i<(*iscoloring)->n; i++) {
32: ISDestroy(&(*iscoloring)->is[i]);
33: }
34: PetscFree((*iscoloring)->is);
35: }
36: PetscFree((*iscoloring)->colors);
37: PetscCommDestroy(&(*iscoloring)->comm);
38: PetscFree((*iscoloring));
39: return(0);
40: }
44: /*@C
45: ISColoringView - Views a coloring context.
47: Collective on ISColoring
49: Input Parameters:
50: + iscoloring - the coloring context
51: - viewer - the viewer
53: Level: advanced
55: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatGetColoring()
56: @*/
57: PetscErrorCode ISColoringView(ISColoring iscoloring,PetscViewer viewer)
58: {
59: PetscInt i;
61: PetscBool iascii;
62: IS *is;
66: if (!viewer) {
67: PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
68: }
71: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
72: if (iascii) {
73: MPI_Comm comm;
74: PetscMPIInt rank;
75: PetscObjectGetComm((PetscObject)viewer,&comm);
76: MPI_Comm_rank(comm,&rank);
77: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
78: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
79: PetscViewerFlush(viewer);
80: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
81: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);
83: ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
84: for (i=0; i<iscoloring->n; i++) {
85: ISView(iscoloring->is[i],viewer);
86: }
87: ISColoringRestoreIS(iscoloring,&is);
88: return(0);
89: }
93: /*@C
94: ISColoringGetIS - Extracts index sets from the coloring context
96: Collective on ISColoring
98: Input Parameter:
99: . iscoloring - the coloring context
101: Output Parameters:
102: + nn - number of index sets in the coloring context
103: - is - array of index sets
105: Level: advanced
107: .seealso: ISColoringRestoreIS(), ISColoringView()
108: @*/
109: PetscErrorCode ISColoringGetIS(ISColoring iscoloring,PetscInt *nn,IS *isis[])
110: {
116: if (nn) *nn = iscoloring->n;
117: if (isis) {
118: if (!iscoloring->is) {
119: PetscInt *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
120: ISColoringValue *colors = iscoloring->colors;
121: IS *is;
123: #if defined(PETSC_USE_DEBUG)
124: for (i=0; i<n; i++) {
125: if (((PetscInt)colors[i]) >= nc) {
126: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
127: }
128: }
129: #endif
130:
131: /* generate the lists of nodes for each color */
132: PetscMalloc(nc*sizeof(PetscInt),&mcolors);
133: PetscMemzero(mcolors,nc*sizeof(PetscInt));
134: for (i=0; i<n; i++) {
135: mcolors[colors[i]]++;
136: }
138: PetscMalloc(nc*sizeof(PetscInt*),&ii);
139: PetscMalloc(n*sizeof(PetscInt),&ii[0]);
140: for (i=1; i<nc; i++) {
141: ii[i] = ii[i-1] + mcolors[i-1];
142: }
143: PetscMemzero(mcolors,nc*sizeof(PetscInt));
145: if (iscoloring->ctype == IS_COLORING_GLOBAL){
146: MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
147: base -= iscoloring->N;
148: for (i=0; i<n; i++) {
149: ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
150: }
151: } else if (iscoloring->ctype == IS_COLORING_GHOSTED){
152: for (i=0; i<n; i++) {
153: ii[colors[i]][mcolors[colors[i]]++] = i; /* local idx */
154: }
155: } else {
156: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this ISColoringType type");
157: }
158:
159: PetscMalloc(nc*sizeof(IS),&is);
160: for (i=0; i<nc; i++) {
161: ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],PETSC_COPY_VALUES,is+i);
162: }
164: iscoloring->is = is;
165: PetscFree(ii[0]);
166: PetscFree(ii);
167: PetscFree(mcolors);
168: }
169: *isis = iscoloring->is;
170: }
171: return(0);
172: }
176: /*@C
177: ISColoringRestoreIS - Restores the index sets extracted from the coloring context
179: Collective on ISColoring
181: Input Parameter:
182: + iscoloring - the coloring context
183: - is - array of index sets
185: Level: advanced
187: .seealso: ISColoringGetIS(), ISColoringView()
188: @*/
189: PetscErrorCode ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
190: {
193:
194: /* currently nothing is done here */
196: return(0);
197: }
202: /*@C
203: ISColoringCreate - Generates an ISColoring context from lists (provided
204: by each processor) of colors for each node.
206: Collective on MPI_Comm
208: Input Parameters:
209: + comm - communicator for the processors creating the coloring
210: . ncolors - max color value
211: . n - number of nodes on this processor
212: - colors - array containing the colors for this processor, color
213: numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
214: and should NOT be freed (The ISColoringDestroy() will free it).
216: Output Parameter:
217: . iscoloring - the resulting coloring data structure
219: Options Database Key:
220: . -is_coloring_view - Activates ISColoringView()
222: Level: advanced
223:
224: Notes: By default sets coloring type to IS_COLORING_GLOBAL
226: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()
228: @*/
229: PetscErrorCode ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],ISColoring *iscoloring)
230: {
232: PetscMPIInt size,rank,tag;
233: PetscInt base,top,i;
234: PetscInt nc,ncwork;
235: PetscBool flg = PETSC_FALSE;
236: MPI_Status status;
239: if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
240: if (ncolors > 65535) {
241: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds 65535 limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
242: } else {
243: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
244: }
245: }
246: PetscNew(struct _n_ISColoring,iscoloring);
247: PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
248: comm = (*iscoloring)->comm;
250: /* compute the number of the first node on my processor */
251: MPI_Comm_size(comm,&size);
253: /* should use MPI_Scan() */
254: MPI_Comm_rank(comm,&rank);
255: if (!rank) {
256: base = 0;
257: top = n;
258: } else {
259: MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
260: top = base+n;
261: }
262: if (rank < size-1) {
263: MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
264: }
266: /* compute the total number of colors */
267: ncwork = 0;
268: for (i=0; i<n; i++) {
269: if (ncwork < colors[i]) ncwork = colors[i];
270: }
271: ncwork++;
272: MPI_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
273: if (nc > ncolors) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
274: (*iscoloring)->n = nc;
275: (*iscoloring)->is = 0;
276: (*iscoloring)->colors = (ISColoringValue *)colors;
277: (*iscoloring)->N = n;
278: (*iscoloring)->refct = 1;
279: (*iscoloring)->ctype = IS_COLORING_GLOBAL;
281: PetscOptionsGetBool(PETSC_NULL,"-is_coloring_view",&flg,PETSC_NULL);
282: if (flg) {
283: PetscViewer viewer;
284: PetscViewerASCIIGetStdout((*iscoloring)->comm,&viewer);
285: ISColoringView(*iscoloring,viewer);
286: }
287: PetscInfo1(0,"Number of colors %D\n",nc);
288: return(0);
289: }
293: /*@
294: ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
295: generates an IS that contains a new global node number for each index based
296: on the partitioing.
298: Collective on IS
300: Input Parameters
301: . partitioning - a partitioning as generated by MatPartitioningApply()
303: Output Parameter:
304: . is - on each processor the index set that defines the global numbers
305: (in the new numbering) for all the nodes currently (before the partitioning)
306: on that processor
308: Level: advanced
310: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()
312: @*/
313: PetscErrorCode ISPartitioningToNumbering(IS part,IS *is)
314: {
315: MPI_Comm comm;
316: PetscInt i,np,npt,n,*starts = PETSC_NULL,*sums = PETSC_NULL,*lsizes = PETSC_NULL,*newi = PETSC_NULL;
317: const PetscInt *indices = PETSC_NULL;
321: PetscObjectGetComm((PetscObject)part,&comm);
323: /* count the number of partitions, i.e., virtual processors */
324: ISGetLocalSize(part,&n);
325: ISGetIndices(part,&indices);
326: np = 0;
327: for (i=0; i<n; i++) {
328: np = PetscMax(np,indices[i]);
329: }
330: MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
331: np = npt+1; /* so that it looks like a MPI_Comm_size output */
333: /*
334: lsizes - number of elements of each partition on this particular processor
335: sums - total number of "previous" nodes for any particular partition
336: starts - global number of first element in each partition on this processor
337: */
338: PetscMalloc3(np,PetscInt,&lsizes,np,PetscInt,&starts,np,PetscInt,&sums);
339: PetscMemzero(lsizes,np*sizeof(PetscInt));
340: for (i=0; i<n; i++) {
341: lsizes[indices[i]]++;
342: }
343: MPI_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
344: MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
345: for (i=0; i<np; i++) {
346: starts[i] -= lsizes[i];
347: }
348: for (i=1; i<np; i++) {
349: sums[i] += sums[i-1];
350: starts[i] += sums[i-1];
351: }
353: /*
354: For each local index give it the new global number
355: */
356: PetscMalloc(n*sizeof(PetscInt),&newi);
357: for (i=0; i<n; i++) {
358: newi[i] = starts[indices[i]]++;
359: }
360: PetscFree3(lsizes,starts,sums);
362: ISRestoreIndices(part,&indices);
363: ISCreateGeneral(comm,n,newi,PETSC_OWN_POINTER,is);
364: ISSetPermutation(*is);
365: return(0);
366: }
370: /*@
371: ISPartitioningCount - Takes a ISPartitioning and determines the number of
372: resulting elements on each (partition) process
374: Collective on IS
376: Input Parameters:
377: + partitioning - a partitioning as generated by MatPartitioningApply()
378: - len - length of the array count, this is the total number of partitions
380: Output Parameter:
381: . count - array of length size, to contain the number of elements assigned
382: to each partition, where size is the number of partitions generated
383: (see notes below).
385: Level: advanced
387: Notes:
388: By default the number of partitions generated (and thus the length
389: of count) is the size of the communicator associated with IS,
390: but it can be set by MatPartitioningSetNParts. The resulting array
391: of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.
394: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
395: MatPartitioningSetNParts(), MatPartitioningApply()
397: @*/
398: PetscErrorCode ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
399: {
400: MPI_Comm comm;
401: PetscInt i,n,*lsizes;
402: const PetscInt *indices;
404: PetscMPIInt npp;
407: PetscObjectGetComm((PetscObject)part,&comm);
408: if (len == PETSC_DEFAULT) {
409: PetscMPIInt size;
410: MPI_Comm_size(comm,&size);
411: len = (PetscInt) size;
412: }
414: /* count the number of partitions */
415: ISGetLocalSize(part,&n);
416: ISGetIndices(part,&indices);
417: #if defined(PETSC_USE_DEBUG)
418: {
419: PetscInt np = 0,npt;
420: for (i=0; i<n; i++) {
421: np = PetscMax(np,indices[i]);
422: }
423: MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
424: np = npt+1; /* so that it looks like a MPI_Comm_size output */
425: if (np > len) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
426: }
427: #endif
429: /*
430: lsizes - number of elements of each partition on this particular processor
431: sums - total number of "previous" nodes for any particular partition
432: starts - global number of first element in each partition on this processor
433: */
434: PetscMalloc(len*sizeof(PetscInt),&lsizes);
435: PetscMemzero(lsizes,len*sizeof(PetscInt));
436: for (i=0; i<n; i++) {
437: lsizes[indices[i]]++;
438: }
439: ISRestoreIndices(part,&indices);
440: npp = PetscMPIIntCast(len);
441: MPI_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
442: PetscFree(lsizes);
443: return(0);
444: }
448: /*@
449: ISAllGather - Given an index set (IS) on each processor, generates a large
450: index set (same on each processor) by concatenating together each
451: processors index set.
453: Collective on IS
455: Input Parameter:
456: . is - the distributed index set
458: Output Parameter:
459: . isout - the concatenated index set (same on all processors)
461: Notes:
462: ISAllGather() is clearly not scalable for large index sets.
464: The IS created on each processor must be created with a common
465: communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
466: with PETSC_COMM_SELF, this routine will not work as expected, since
467: each process will generate its own new IS that consists only of
468: itself.
470: The communicator for this new IS is PETSC_COMM_SELF
472: Level: intermediate
474: Concepts: gather^index sets
475: Concepts: index sets^gathering to all processors
476: Concepts: IS^gathering to all processors
478: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
479: @*/
480: PetscErrorCode ISAllGather(IS is,IS *isout)
481: {
483: PetscInt *indices,n,i,N,step,first;
484: const PetscInt *lindices;
485: MPI_Comm comm;
486: PetscMPIInt size,*sizes = PETSC_NULL,*offsets = PETSC_NULL,nn;
487: PetscBool stride;
493: PetscObjectGetComm((PetscObject)is,&comm);
494: MPI_Comm_size(comm,&size);
495: ISGetLocalSize(is,&n);
496: PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);
497: if (size == 1 && stride) { /* should handle parallel ISStride also */
498: ISStrideGetInfo(is,&first,&step);
499: ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
500: } else {
501: PetscMalloc2(size,PetscMPIInt,&sizes,size,PetscMPIInt,&offsets);
502:
503: nn = PetscMPIIntCast(n);
504: MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
505: offsets[0] = 0;
506: for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
507: N = offsets[size-1] + sizes[size-1];
508:
509: PetscMalloc(N*sizeof(PetscInt),&indices);
510: ISGetIndices(is,&lindices);
511: MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
512: ISRestoreIndices(is,&lindices);
513: PetscFree2(sizes,offsets);
515: ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);
516: }
517: return(0);
518: }
522: /*@C
523: ISAllGatherColors - Given a a set of colors on each processor, generates a large
524: set (same on each processor) by concatenating together each processors colors
526: Collective on MPI_Comm
528: Input Parameter:
529: + comm - communicator to share the indices
530: . n - local size of set
531: - lindices - local colors
533: Output Parameter:
534: + outN - total number of indices
535: - outindices - all of the colors
537: Notes:
538: ISAllGatherColors() is clearly not scalable for large index sets.
541: Level: intermediate
543: Concepts: gather^index sets
544: Concepts: index sets^gathering to all processors
545: Concepts: IS^gathering to all processors
547: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
548: @*/
549: PetscErrorCode ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
550: {
551: ISColoringValue *indices;
552: PetscErrorCode ierr;
553: PetscInt i,N;
554: PetscMPIInt size,*offsets = PETSC_NULL,*sizes = PETSC_NULL, nn = n;
557: MPI_Comm_size(comm,&size);
558: PetscMalloc2(size,PetscMPIInt,&sizes,size,PetscMPIInt,&offsets);
559:
560: MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
561: offsets[0] = 0;
562: for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
563: N = offsets[size-1] + sizes[size-1];
564: PetscFree2(sizes,offsets);
566: PetscMalloc((N+1)*sizeof(ISColoringValue),&indices);
567: MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);
569: *outindices = indices;
570: if (outN) *outN = N;
571: return(0);
572: }
576: /*@
577: ISComplement - Given an index set (IS) generates the complement index set. That is all
578: all indices that are NOT in the given set.
580: Collective on IS
582: Input Parameter:
583: + is - the index set
584: . nmin - the first index desired in the local part of the complement
585: - nmax - the largest index desired in the local part of the complement (note that all indices in is must be greater or equal to nmin and less than nmax)
587: Output Parameter:
588: . isout - the complement
590: Notes: The communicator for this new IS is the same as for the input IS
592: For a parallel IS, this will generate the local part of the complement on each process
594: To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
595: call this routine.
597: Level: intermediate
599: Concepts: gather^index sets
600: Concepts: index sets^gathering to all processors
601: Concepts: IS^gathering to all processors
603: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
604: @*/
605: PetscErrorCode ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
606: {
608: const PetscInt *indices;
609: PetscInt n,i,j,unique,cnt,*nindices;
610: PetscBool sorted;
615: if (nmin < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
616: if (nmin > nmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
617: ISSorted(is,&sorted);
618: if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set must be sorted");
620: ISGetLocalSize(is,&n);
621: ISGetIndices(is,&indices);
622: #if defined(PETSC_USE_DEBUG)
623: for (i=0; i<n; i++) {
624: if (indices[i] < nmin) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
625: if (indices[i] >= nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
626: }
627: #endif
628: /* Count number of unique entries */
629: unique = (n>0);
630: for (i=0; i<n-1; i++) {
631: if (indices[i+1] != indices[i]) unique++;
632: }
633: PetscMalloc((nmax-nmin-unique)*sizeof(PetscInt),&nindices);
634: cnt = 0;
635: for (i=nmin,j=0; i<nmax; i++) {
636: if (j<n && i==indices[j]) do { j++; } while (j<n && i==indices[j]);
637: else nindices[cnt++] = i;
638: }
639: if (cnt != nmax-nmin-unique) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of entries found in complement %D does not match expected %D",cnt,nmax-nmin-unique);
640: ISCreateGeneral(((PetscObject)is)->comm,cnt,nindices,PETSC_OWN_POINTER,isout);
641: ISRestoreIndices(is,&indices);
642: return(0);
643: }