Actual source code: iscoloring.c

petsc-3.5.4 2015-05-23
Report Typos and Errors
  2: #include <petsc-private/isimpl.h>    /*I "petscis.h"  I*/
  3: #include <petscviewer.h>

  5: const char *const ISColoringTypes[] = {"global","ghosted","ISColoringType","IS_COLORING_",0};

  9: PetscErrorCode ISColoringReference(ISColoring coloring)
 10: {
 12:   (coloring)->refct++;
 13:   return(0);
 14: }

 18: PetscErrorCode ISColoringSetType(ISColoring coloring,ISColoringType type)
 19: {
 21:   (coloring)->ctype = type;
 22:   return(0);
 23: }

 27: /*@
 28:    ISColoringDestroy - Destroys a coloring context.

 30:    Collective on ISColoring

 32:    Input Parameter:
 33: .  iscoloring - the coloring context

 35:    Level: advanced

 37: .seealso: ISColoringView(), MatColoring
 38: @*/
 39: PetscErrorCode  ISColoringDestroy(ISColoring *iscoloring)
 40: {
 41:   PetscInt       i;

 45:   if (!*iscoloring) return(0);
 47:   if (--(*iscoloring)->refct > 0) {*iscoloring = 0; return(0);}

 49:   if ((*iscoloring)->is) {
 50:     for (i=0; i<(*iscoloring)->n; i++) {
 51:       ISDestroy(&(*iscoloring)->is[i]);
 52:     }
 53:     PetscFree((*iscoloring)->is);
 54:   }
 55:   PetscFree((*iscoloring)->colors);
 56:   PetscCommDestroy(&(*iscoloring)->comm);
 57:   PetscFree((*iscoloring));
 58:   return(0);
 59: }

 63: /*
 64:   ISColoringViewFromOptions - Processes command line options to determine if/how an ISColoring object is to be viewed. 

 66:   Collective on ISColoring

 68:   Input Parameters:
 69: + obj   - the ISColoring object
 70: . prefix - prefix to use for viewing, or NULL to use prefix of 'mat'
 71: - optionname - option to activate viewing

 73:   Level: intermediate

 75:   Developer Note: This cannot use PetscObjectViewFromOptions() because ISColoring is not a PetscObject

 77: */
 78: PetscErrorCode ISColoringViewFromOptions(ISColoring obj,const char prefix[],const char optionname[])
 79: {
 80:   PetscErrorCode    ierr;
 81:   PetscViewer       viewer;
 82:   PetscBool         flg;
 83:   static PetscBool  incall = PETSC_FALSE;
 84:   PetscViewerFormat format;

 87:   if (incall) return(0);
 88:   incall = PETSC_TRUE;
 89:   PetscOptionsGetViewer(obj->comm,prefix,optionname,&viewer,&format,&flg);
 90:   if (flg) {
 91:     PetscViewerPushFormat(viewer,format);
 92:     ISColoringView(obj,viewer);
 93:     PetscViewerPopFormat(viewer);
 94:     PetscViewerDestroy(&viewer);
 95:   }
 96:   incall = PETSC_FALSE;
 97:   return(0);
 98: }

102: /*@C
103:    ISColoringView - Views a coloring context.

105:    Collective on ISColoring

107:    Input Parameters:
108: +  iscoloring - the coloring context
109: -  viewer - the viewer

111:    Level: advanced

113: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatColoring
114: @*/
115: PetscErrorCode  ISColoringView(ISColoring iscoloring,PetscViewer viewer)
116: {
117:   PetscInt       i;
119:   PetscBool      iascii;
120:   IS             *is;

124:   if (!viewer) {
125:     PetscViewerASCIIGetStdout(iscoloring->comm,&viewer);
126:   }

129:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
130:   if (iascii) {
131:     MPI_Comm    comm;
132:     PetscMPIInt size,rank;

134:     PetscObjectGetComm((PetscObject)viewer,&comm);
135:     MPI_Comm_size(comm,&size);
136:     MPI_Comm_rank(comm,&rank);
137:     PetscViewerASCIIPrintf(viewer,"ISColoring Object: %d MPI processes\n",size);
138:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
139:     PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %d\n",rank,iscoloring->n);
140:     PetscViewerFlush(viewer);
141:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
142:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);

144:   ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
145:   for (i=0; i<iscoloring->n; i++) {
146:     ISView(iscoloring->is[i],viewer);
147:   }
148:   ISColoringRestoreIS(iscoloring,&is);
149:   return(0);
150: }

154: /*@C
155:    ISColoringGetIS - Extracts index sets from the coloring context

157:    Collective on ISColoring

159:    Input Parameter:
160: .  iscoloring - the coloring context

162:    Output Parameters:
163: +  nn - number of index sets in the coloring context
164: -  is - array of index sets

166:    Level: advanced

168: .seealso: ISColoringRestoreIS(), ISColoringView()
169: @*/
170: PetscErrorCode  ISColoringGetIS(ISColoring iscoloring,PetscInt *nn,IS *isis[])
171: {


177:   if (nn) *nn = iscoloring->n;
178:   if (isis) {
179:     if (!iscoloring->is) {
180:       PetscInt        *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
181:       ISColoringValue *colors = iscoloring->colors;
182:       IS              *is;

184: #if defined(PETSC_USE_DEBUG)
185:       for (i=0; i<n; i++) {
186:         if (((PetscInt)colors[i]) >= nc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coloring is our of range index %d value %d number colors %d",(int)i,(int)colors[i],(int)nc);
187:       }
188: #endif

190:       /* generate the lists of nodes for each color */
191:       PetscCalloc1(nc,&mcolors);
192:       for (i=0; i<n; i++) mcolors[colors[i]]++;

194:       PetscMalloc1(nc,&ii);
195:       PetscMalloc1(n,&ii[0]);
196:       for (i=1; i<nc; i++) ii[i] = ii[i-1] + mcolors[i-1];
197:       PetscMemzero(mcolors,nc*sizeof(PetscInt));

199:       if (iscoloring->ctype == IS_COLORING_GLOBAL) {
200:         MPI_Scan(&iscoloring->N,&base,1,MPIU_INT,MPI_SUM,iscoloring->comm);
201:         base -= iscoloring->N;
202:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i + base; /* global idx */
203:       } else if (iscoloring->ctype == IS_COLORING_GHOSTED) {
204:         for (i=0; i<n; i++) ii[colors[i]][mcolors[colors[i]]++] = i;   /* local idx */
205:       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this ISColoringType type");

207:       PetscMalloc1(nc,&is);
208:       for (i=0; i<nc; i++) {
209:         ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],PETSC_COPY_VALUES,is+i);
210:       }

212:       iscoloring->is = is;
213:       PetscFree(ii[0]);
214:       PetscFree(ii);
215:       PetscFree(mcolors);
216:     }
217:     *isis = iscoloring->is;
218:   }
219:   return(0);
220: }

224: /*@C
225:    ISColoringRestoreIS - Restores the index sets extracted from the coloring context

227:    Collective on ISColoring

229:    Input Parameter:
230: +  iscoloring - the coloring context
231: -  is - array of index sets

233:    Level: advanced

235: .seealso: ISColoringGetIS(), ISColoringView()
236: @*/
237: PetscErrorCode  ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
238: {

242:   /* currently nothing is done here */
243:   return(0);
244: }


249: /*@C
250:     ISColoringCreate - Generates an ISColoring context from lists (provided
251:     by each processor) of colors for each node.

253:     Collective on MPI_Comm

255:     Input Parameters:
256: +   comm - communicator for the processors creating the coloring
257: .   ncolors - max color value
258: .   n - number of nodes on this processor
259: -   colors - array containing the colors for this processor, color
260:              numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
261:              and should NOT be freed (The ISColoringDestroy() will free it).

263:     Output Parameter:
264: .   iscoloring - the resulting coloring data structure

266:     Options Database Key:
267: .   -is_coloring_view - Activates ISColoringView()

269:    Level: advanced

271:     Notes: By default sets coloring type to  IS_COLORING_GLOBAL

273: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()

275: @*/
276: PetscErrorCode  ISColoringCreate(MPI_Comm comm,PetscInt ncolors,PetscInt n,const ISColoringValue colors[],ISColoring *iscoloring)
277: {
279:   PetscMPIInt    size,rank,tag;
280:   PetscInt       base,top,i;
281:   PetscInt       nc,ncwork;
282:   MPI_Status     status;

285:   if (ncolors != PETSC_DECIDE && ncolors > IS_COLORING_MAX) {
286:     if (ncolors > 65535) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds 65535 limit. This number is unrealistic. Perhaps a bug in code?\nCurrent max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
287:     else                 SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Max color value exeeds limit. Perhaps reconfigure PETSc with --with-is-color-value-type=short?\n Current max: %d user rewuested: %d",IS_COLORING_MAX,ncolors);
288:   }
289:   PetscNew(iscoloring);
290:   PetscCommDuplicate(comm,&(*iscoloring)->comm,&tag);
291:   comm = (*iscoloring)->comm;

293:   /* compute the number of the first node on my processor */
294:   MPI_Comm_size(comm,&size);

296:   /* should use MPI_Scan() */
297:   MPI_Comm_rank(comm,&rank);
298:   if (!rank) {
299:     base = 0;
300:     top  = n;
301:   } else {
302:     MPI_Recv(&base,1,MPIU_INT,rank-1,tag,comm,&status);
303:     top  = base+n;
304:   }
305:   if (rank < size-1) {
306:     MPI_Send(&top,1,MPIU_INT,rank+1,tag,comm);
307:   }

309:   /* compute the total number of colors */
310:   ncwork = 0;
311:   for (i=0; i<n; i++) {
312:     if (ncwork < colors[i]) ncwork = colors[i];
313:   }
314:   ncwork++;
315:   MPI_Allreduce(&ncwork,&nc,1,MPIU_INT,MPI_MAX,comm);
316:   if (nc > ncolors) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of colors passed in %D is less then the actual number of colors in array %D",ncolors,nc);
317:   (*iscoloring)->n      = nc;
318:   (*iscoloring)->is     = 0;
319:   (*iscoloring)->colors = (ISColoringValue*)colors;
320:   (*iscoloring)->N      = n;
321:   (*iscoloring)->refct  = 1;
322:   (*iscoloring)->ctype  = IS_COLORING_GLOBAL;
323:   ISColoringViewFromOptions(*iscoloring,NULL,"-is_coloring_view");
324:   PetscInfo1(0,"Number of colors %D\n",nc);
325:   return(0);
326: }

330: /*@
331:     ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
332:     generates an IS that contains a new global node number for each index based
333:     on the partitioing.

335:     Collective on IS

337:     Input Parameters
338: .   partitioning - a partitioning as generated by MatPartitioningApply()

340:     Output Parameter:
341: .   is - on each processor the index set that defines the global numbers
342:          (in the new numbering) for all the nodes currently (before the partitioning)
343:          on that processor

345:    Level: advanced

347: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningCount()

349: @*/
350: PetscErrorCode  ISPartitioningToNumbering(IS part,IS *is)
351: {
352:   MPI_Comm       comm;
353:   PetscInt       i,np,npt,n,*starts = NULL,*sums = NULL,*lsizes = NULL,*newi = NULL;
354:   const PetscInt *indices = NULL;

358:   PetscObjectGetComm((PetscObject)part,&comm);

360:   /* count the number of partitions, i.e., virtual processors */
361:   ISGetLocalSize(part,&n);
362:   ISGetIndices(part,&indices);
363:   np   = 0;
364:   for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
365:   MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
366:   np   = npt+1; /* so that it looks like a MPI_Comm_size output */

368:   /*
369:         lsizes - number of elements of each partition on this particular processor
370:         sums - total number of "previous" nodes for any particular partition
371:         starts - global number of first element in each partition on this processor
372:   */
373:   PetscMalloc3(np,&lsizes,np,&starts,np,&sums);
374:   PetscMemzero(lsizes,np*sizeof(PetscInt));
375:   for (i=0; i<n; i++) lsizes[indices[i]]++;
376:   MPI_Allreduce(lsizes,sums,np,MPIU_INT,MPI_SUM,comm);
377:   MPI_Scan(lsizes,starts,np,MPIU_INT,MPI_SUM,comm);
378:   for (i=0; i<np; i++) starts[i] -= lsizes[i];
379:   for (i=1; i<np; i++) {
380:     sums[i]   += sums[i-1];
381:     starts[i] += sums[i-1];
382:   }

384:   /*
385:       For each local index give it the new global number
386:   */
387:   PetscMalloc1(n,&newi);
388:   for (i=0; i<n; i++) newi[i] = starts[indices[i]]++;
389:   PetscFree3(lsizes,starts,sums);

391:   ISRestoreIndices(part,&indices);
392:   ISCreateGeneral(comm,n,newi,PETSC_OWN_POINTER,is);
393:   ISSetPermutation(*is);
394:   return(0);
395: }

399: /*@
400:     ISPartitioningCount - Takes a ISPartitioning and determines the number of
401:     resulting elements on each (partition) process

403:     Collective on IS

405:     Input Parameters:
406: +   partitioning - a partitioning as generated by MatPartitioningApply()
407: -   len - length of the array count, this is the total number of partitions

409:     Output Parameter:
410: .   count - array of length size, to contain the number of elements assigned
411:         to each partition, where size is the number of partitions generated
412:          (see notes below).

414:    Level: advanced

416:     Notes:
417:         By default the number of partitions generated (and thus the length
418:         of count) is the size of the communicator associated with IS,
419:         but it can be set by MatPartitioningSetNParts. The resulting array
420:         of lengths can for instance serve as input of PCBJacobiSetTotalBlocks.


423: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering(),
424:         MatPartitioningSetNParts(), MatPartitioningApply()

426: @*/
427: PetscErrorCode  ISPartitioningCount(IS part,PetscInt len,PetscInt count[])
428: {
429:   MPI_Comm       comm;
430:   PetscInt       i,n,*lsizes;
431:   const PetscInt *indices;
433:   PetscMPIInt    npp;

436:   PetscObjectGetComm((PetscObject)part,&comm);
437:   if (len == PETSC_DEFAULT) {
438:     PetscMPIInt size;
439:     MPI_Comm_size(comm,&size);
440:     len  = (PetscInt) size;
441:   }

443:   /* count the number of partitions */
444:   ISGetLocalSize(part,&n);
445:   ISGetIndices(part,&indices);
446: #if defined(PETSC_USE_DEBUG)
447:   {
448:     PetscInt np = 0,npt;
449:     for (i=0; i<n; i++) np = PetscMax(np,indices[i]);
450:     MPI_Allreduce(&np,&npt,1,MPIU_INT,MPI_MAX,comm);
451:     np   = npt+1; /* so that it looks like a MPI_Comm_size output */
452:     if (np > len) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Length of count array %D is less than number of partitions %D",len,np);
453:   }
454: #endif

456:   /*
457:         lsizes - number of elements of each partition on this particular processor
458:         sums - total number of "previous" nodes for any particular partition
459:         starts - global number of first element in each partition on this processor
460:   */
461:   PetscCalloc1(len,&lsizes);
462:   for (i=0; i<n; i++) lsizes[indices[i]]++;
463:   ISRestoreIndices(part,&indices);
464:   PetscMPIIntCast(len,&npp);
465:   MPI_Allreduce(lsizes,count,npp,MPIU_INT,MPI_SUM,comm);
466:   PetscFree(lsizes);
467:   return(0);
468: }

472: /*@
473:     ISAllGather - Given an index set (IS) on each processor, generates a large
474:     index set (same on each processor) by concatenating together each
475:     processors index set.

477:     Collective on IS

479:     Input Parameter:
480: .   is - the distributed index set

482:     Output Parameter:
483: .   isout - the concatenated index set (same on all processors)

485:     Notes:
486:     ISAllGather() is clearly not scalable for large index sets.

488:     The IS created on each processor must be created with a common
489:     communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
490:     with PETSC_COMM_SELF, this routine will not work as expected, since
491:     each process will generate its own new IS that consists only of
492:     itself.

494:     The communicator for this new IS is PETSC_COMM_SELF

496:     Level: intermediate

498:     Concepts: gather^index sets
499:     Concepts: index sets^gathering to all processors
500:     Concepts: IS^gathering to all processors

502: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock()
503: @*/
504: PetscErrorCode  ISAllGather(IS is,IS *isout)
505: {
507:   PetscInt       *indices,n,i,N,step,first;
508:   const PetscInt *lindices;
509:   MPI_Comm       comm;
510:   PetscMPIInt    size,*sizes = NULL,*offsets = NULL,nn;
511:   PetscBool      stride;


517:   PetscObjectGetComm((PetscObject)is,&comm);
518:   MPI_Comm_size(comm,&size);
519:   ISGetLocalSize(is,&n);
520:   PetscObjectTypeCompare((PetscObject)is,ISSTRIDE,&stride);
521:   if (size == 1 && stride) { /* should handle parallel ISStride also */
522:     ISStrideGetInfo(is,&first,&step);
523:     ISCreateStride(PETSC_COMM_SELF,n,first,step,isout);
524:   } else {
525:     PetscMalloc2(size,&sizes,size,&offsets);

527:     PetscMPIIntCast(n,&nn);
528:     MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
529:     offsets[0] = 0;
530:     for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
531:     N = offsets[size-1] + sizes[size-1];

533:     PetscMalloc1(N,&indices);
534:     ISGetIndices(is,&lindices);
535:     MPI_Allgatherv((void*)lindices,nn,MPIU_INT,indices,sizes,offsets,MPIU_INT,comm);
536:     ISRestoreIndices(is,&lindices);
537:     PetscFree2(sizes,offsets);

539:     ISCreateGeneral(PETSC_COMM_SELF,N,indices,PETSC_OWN_POINTER,isout);
540:   }
541:   return(0);
542: }

546: /*@C
547:     ISAllGatherColors - Given a a set of colors on each processor, generates a large
548:     set (same on each processor) by concatenating together each processors colors

550:     Collective on MPI_Comm

552:     Input Parameter:
553: +   comm - communicator to share the indices
554: .   n - local size of set
555: -   lindices - local colors

557:     Output Parameter:
558: +   outN - total number of indices
559: -   outindices - all of the colors

561:     Notes:
562:     ISAllGatherColors() is clearly not scalable for large index sets.


565:     Level: intermediate

567:     Concepts: gather^index sets
568:     Concepts: index sets^gathering to all processors
569:     Concepts: IS^gathering to all processors

571: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
572: @*/
573: PetscErrorCode  ISAllGatherColors(MPI_Comm comm,PetscInt n,ISColoringValue *lindices,PetscInt *outN,ISColoringValue *outindices[])
574: {
575:   ISColoringValue *indices;
576:   PetscErrorCode  ierr;
577:   PetscInt        i,N;
578:   PetscMPIInt     size,*offsets = NULL,*sizes = NULL, nn = n;

581:   MPI_Comm_size(comm,&size);
582:   PetscMalloc2(size,&sizes,size,&offsets);

584:   MPI_Allgather(&nn,1,MPI_INT,sizes,1,MPI_INT,comm);
585:   offsets[0] = 0;
586:   for (i=1; i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
587:   N    = offsets[size-1] + sizes[size-1];
588:   PetscFree2(sizes,offsets);

590:   PetscMalloc1((N+1),&indices);
591:   MPI_Allgatherv(lindices,(PetscMPIInt)n,MPIU_COLORING_VALUE,indices,sizes,offsets,MPIU_COLORING_VALUE,comm);

593:   *outindices = indices;
594:   if (outN) *outN = N;
595:   return(0);
596: }

600: /*@
601:     ISComplement - Given an index set (IS) generates the complement index set. That is all
602:        all indices that are NOT in the given set.

604:     Collective on IS

606:     Input Parameter:
607: +   is - the index set
608: .   nmin - the first index desired in the local part of the complement
609: -   nmax - the largest index desired in the local part of the complement (note that all indices in is must be greater or equal to nmin and less than nmax)

611:     Output Parameter:
612: .   isout - the complement

614:     Notes:  The communicator for this new IS is the same as for the input IS

616:       For a parallel IS, this will generate the local part of the complement on each process

618:       To generate the entire complement (on each process) of a parallel IS, first call ISAllGather() and then
619:     call this routine.

621:     Level: intermediate

623:     Concepts: gather^index sets
624:     Concepts: index sets^gathering to all processors
625:     Concepts: IS^gathering to all processors

627: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
628: @*/
629: PetscErrorCode  ISComplement(IS is,PetscInt nmin,PetscInt nmax,IS *isout)
630: {
632:   const PetscInt *indices;
633:   PetscInt       n,i,j,unique,cnt,*nindices;
634:   PetscBool      sorted;

639:   if (nmin < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be negative",nmin);
640:   if (nmin > nmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"nmin %D cannot be greater than nmax %D",nmin,nmax);
641:   ISSorted(is,&sorted);
642:   if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Index set must be sorted");

644:   ISGetLocalSize(is,&n);
645:   ISGetIndices(is,&indices);
646: #if defined(PETSC_USE_DEBUG)
647:   for (i=0; i<n; i++) {
648:     if (indices[i] <  nmin) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is smaller than minimum given %D",i,indices[i],nmin);
649:     if (indices[i] >= nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index %D's value %D is larger than maximum given %D",i,indices[i],nmax);
650:   }
651: #endif
652:   /* Count number of unique entries */
653:   unique = (n>0);
654:   for (i=0; i<n-1; i++) {
655:     if (indices[i+1] != indices[i]) unique++;
656:   }
657:   PetscMalloc1((nmax-nmin-unique),&nindices);
658:   cnt  = 0;
659:   for (i=nmin,j=0; i<nmax; i++) {
660:     if (j<n && i==indices[j]) do { j++; } while (j<n && i==indices[j]);
661:     else nindices[cnt++] = i;
662:   }
663:   if (cnt != nmax-nmin-unique) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of entries found in complement %D does not match expected %D",cnt,nmax-nmin-unique);
664:   ISCreateGeneral(PetscObjectComm((PetscObject)is),cnt,nindices,PETSC_OWN_POINTER,isout);
665:   ISRestoreIndices(is,&indices);
666:   return(0);
667: }