Actual source code: isltog.c

petsc-3.4.5 2014-06-29
  2: #include <petsc-private/isimpl.h>    /*I "petscis.h"  I*/
  3: #include <petscsf.h>
  4: #include <petscviewer.h>

  6: PetscClassId IS_LTOGM_CLASSID;

 10: PetscErrorCode ISG2LMapApply(ISLocalToGlobalMapping mapping,PetscInt n,const PetscInt in[],PetscInt out[])
 11: {
 13:   PetscInt       i,*globals = mapping->globals,start = mapping->globalstart,end = mapping->globalend;

 16:   if (!mapping->globals) {
 17:     ISGlobalToLocalMappingApply(mapping,IS_GTOLM_MASK,0,0,0,0);
 18:   }
 19:   for (i=0; i<n; i++) {
 20:     if (in[i] < 0)          out[i] = in[i];
 21:     else if (in[i] < start) out[i] = -1;
 22:     else if (in[i] > end)   out[i] = -1;
 23:     else                    out[i] = globals[in[i] - start];
 24:   }
 25:   return(0);
 26: }


 31: /*@C
 32:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 34:     Not Collective

 36:     Input Parameter:
 37: .   ltog - local to global mapping

 39:     Output Parameter:
 40: .   n - the number of entries in the local mapping

 42:     Level: advanced

 44:     Concepts: mapping^local to global

 46: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 47: @*/
 48: PetscErrorCode  ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 49: {
 53:   *n = mapping->n;
 54:   return(0);
 55: }

 59: /*@C
 60:     ISLocalToGlobalMappingView - View a local to global mapping

 62:     Not Collective

 64:     Input Parameters:
 65: +   ltog - local to global mapping
 66: -   viewer - viewer

 68:     Level: advanced

 70:     Concepts: mapping^local to global

 72: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 73: @*/
 74: PetscErrorCode  ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 75: {
 76:   PetscInt       i;
 77:   PetscMPIInt    rank;
 78:   PetscBool      iascii;

 83:   if (!viewer) {
 84:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mapping),&viewer);
 85:   }

 88:   MPI_Comm_rank(PetscObjectComm((PetscObject)mapping),&rank);
 89:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
 90:   if (iascii) {
 91:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
 92:     for (i=0; i<mapping->n; i++) {
 93:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 94:     }
 95:     PetscViewerFlush(viewer);
 96:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
 97:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 98:   return(0);
 99: }

103: /*@
104:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
105:     ordering and a global parallel ordering.

107:     Not collective

109:     Input Parameter:
110: .   is - index set containing the global numbers for each local number

112:     Output Parameter:
113: .   mapping - new mapping data structure

115:     Level: advanced

117:     Concepts: mapping^local to global

119: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
120: @*/
121: PetscErrorCode  ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
122: {
124:   PetscInt       n;
125:   const PetscInt *indices;
126:   MPI_Comm       comm;


132:   PetscObjectGetComm((PetscObject)is,&comm);
133:   ISGetLocalSize(is,&n);
134:   ISGetIndices(is,&indices);
135:   ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);
136:   ISRestoreIndices(is,&indices);
137:   return(0);
138: }

142: /*@C
143:     ISLocalToGlobalMappingCreateSF - Creates a mapping between a local (0 to n)
144:     ordering and a global parallel ordering.

146:     Collective

148:     Input Parameter:
149: +   sf - star forest mapping contiguous local indices to (rank, offset)
150: -   start - first global index on this process

152:     Output Parameter:
153: .   mapping - new mapping data structure

155:     Level: advanced

157:     Concepts: mapping^local to global

159: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
160: @*/
161: PetscErrorCode ISLocalToGlobalMappingCreateSF(PetscSF sf,PetscInt start,ISLocalToGlobalMapping *mapping)
162: {
164:   PetscInt       i,maxlocal,nroots,nleaves,*globals,*ltog;
165:   const PetscInt *ilocal;
166:   MPI_Comm       comm;


172:   PetscObjectGetComm((PetscObject)sf,&comm);
173:   PetscSFGetGraph(sf,&nroots,&nleaves,&ilocal,NULL);
174:   if (ilocal) {
175:     for (i=0,maxlocal=0; i<nleaves; i++) maxlocal = PetscMax(maxlocal,ilocal[i]+1);
176:   }
177:   else maxlocal = nleaves;
178:   PetscMalloc(nroots*sizeof(PetscInt),&globals);
179:   PetscMalloc(maxlocal*sizeof(PetscInt),&ltog);
180:   for (i=0; i<nroots; i++) globals[i] = start + i;
181:   for (i=0; i<maxlocal; i++) ltog[i] = -1;
182:   PetscSFBcastBegin(sf,MPIU_INT,globals,ltog);
183:   PetscSFBcastEnd(sf,MPIU_INT,globals,ltog);
184:   ISLocalToGlobalMappingCreate(comm,maxlocal,ltog,PETSC_OWN_POINTER,mapping);
185:   PetscFree(globals);
186:   return(0);
187: }

191: /*@
192:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
193:     ordering and a global parallel ordering.

195:     Not Collective, but communicator may have more than one process

197:     Input Parameters:
198: +   comm - MPI communicator
199: .   n - the number of local elements
200: .   indices - the global index for each local element, these do not need to be in increasing order (sorted)
201: -   mode - see PetscCopyMode

203:     Output Parameter:
204: .   mapping - new mapping data structure

206:     Level: advanced

208:     Concepts: mapping^local to global

210: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
211: @*/
212: PetscErrorCode  ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping)
213: {
215:   PetscInt       *in;


221:   *mapping = NULL;
222: #if !defined(PETSC_USE_DYNAMIC_LIBRARIES)
223:   ISInitializePackage();
224: #endif

226:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,"ISLocalToGlobalMapping","Local to global mapping","IS",
227:                            cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
228:   (*mapping)->n = n;
229:   /*
230:     Do not create the global to local mapping. This is only created if
231:     ISGlobalToLocalMapping() is called
232:   */
233:   (*mapping)->globals = 0;
234:   if (mode == PETSC_COPY_VALUES) {
235:     PetscMalloc(n*sizeof(PetscInt),&in);
236:     PetscMemcpy(in,indices,n*sizeof(PetscInt));
237:     PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));
238:     (*mapping)->indices = in;
239:   } else if (mode == PETSC_OWN_POINTER) (*mapping)->indices = (PetscInt*)indices;
240:   else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER");
241:   return(0);
242: }

246: /*@
247:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
248:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
249:        and VecSetLocalToGlobalMappingBlock().

251:     Not Collective, but communicator may have more than one process

253:     Input Parameters:
254: +    inmap - original point-wise mapping
255: -    bs - block size

257:     Output Parameter:
258: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

260:     Level: advanced

262:     Concepts: mapping^local to global

264: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
265: @*/
266: PetscErrorCode  ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
267: {
269:   PetscInt       *ii,i,n;

274:   if (bs > 1) {
275:     n = inmap->n/bs;
276:     if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
277:     PetscMalloc(n*sizeof(PetscInt),&ii);
278:     for (i=0; i<n; i++) ii[i] = inmap->indices[bs*i]/bs;
279:     ISLocalToGlobalMappingCreate(PetscObjectComm((PetscObject)inmap),n,ii,PETSC_OWN_POINTER,outmap);
280:   } else {
281:     PetscObjectReference((PetscObject)inmap);
282:     *outmap = inmap;
283:   }
284:   return(0);
285: }

289: /*@
290:     ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked
291:        ISLocalToGlobalMapping

293:     Not Collective, but communicator may have more than one process

295:     Input Parameter:
296: + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
297: - bs - block size

299:     Output Parameter:
300: .   outmap - pointwise mapping

302:     Level: advanced

304:     Concepts: mapping^local to global

306: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock()
307: @*/
308: PetscErrorCode  ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
309: {
311:   PetscInt       *ii,i,n;

316:   if (bs > 1) {
317:     n    = inmap->n*bs;
318:     PetscMalloc(n*sizeof(PetscInt),&ii);
319:     for (i=0; i<n; i++) ii[i] = inmap->indices[i/bs]*bs + (i%bs);
320:     ISLocalToGlobalMappingCreate(PetscObjectComm((PetscObject)inmap),n,ii,PETSC_OWN_POINTER,outmap);
321:   } else {
322:     PetscObjectReference((PetscObject)inmap);
323:     *outmap = inmap;
324:   }
325:   return(0);
326: }

330: /*@
331:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
332:    ordering and a global parallel ordering.

334:    Note Collective

336:    Input Parameters:
337: .  mapping - mapping data structure

339:    Level: advanced

341: .seealso: ISLocalToGlobalMappingCreate()
342: @*/
343: PetscErrorCode  ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping)
344: {

348:   if (!*mapping) return(0);
350:   if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;return(0);}
351:   PetscFree((*mapping)->indices);
352:   PetscFree((*mapping)->globals);
353:   PetscHeaderDestroy(mapping);
354:   *mapping = 0;
355:   return(0);
356: }

360: /*@
361:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
362:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
363:     context.

365:     Not collective

367:     Input Parameters:
368: +   mapping - mapping between local and global numbering
369: -   is - index set in local numbering

371:     Output Parameters:
372: .   newis - index set in global numbering

374:     Level: advanced

376:     Concepts: mapping^local to global

378: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
379:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
380: @*/
381: PetscErrorCode  ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
382: {
384:   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
385:   const PetscInt *idxin;


392:   ISGetLocalSize(is,&n);
393:   ISGetIndices(is,&idxin);
394:   idxmap = mapping->indices;

396:   PetscMalloc(n*sizeof(PetscInt),&idxout);
397:   for (i=0; i<n; i++) {
398:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
399:     idxout[i] = idxmap[idxin[i]];
400:   }
401:   ISRestoreIndices(is,&idxin);
402:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);
403:   return(0);
404: }

408: /*@
409:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
410:    and converts them to the global numbering.

412:    Not collective

414:    Input Parameters:
415: +  mapping - the local to global mapping context
416: .  N - number of integers
417: -  in - input indices in local numbering

419:    Output Parameter:
420: .  out - indices in global numbering

422:    Notes:
423:    The in and out array parameters may be identical.

425:    Level: advanced

427: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
428:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
429:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

431:     Concepts: mapping^local to global
432: @*/
433: PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,PetscInt N,const PetscInt in[],PetscInt out[])
434: {
435:   PetscInt       i,Nmax = mapping->n;
436:   const PetscInt *idx = mapping->indices;

439:   for (i=0; i<N; i++) {
440:     if (in[i] < 0) {
441:       out[i] = in[i];
442:       continue;
443:     }
444:     if (in[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %D too large %D (max) at %D",in[i],Nmax,i);
445:     out[i] = idx[in[i]];
446:   }
447:   return(0);
448: }

450: /* -----------------------------------------------------------------------------------------*/

454: /*
455:     Creates the global fields in the ISLocalToGlobalMapping structure
456: */
457: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
458: {
460:   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

463:   end   = 0;
464:   start = PETSC_MAX_INT;

466:   for (i=0; i<n; i++) {
467:     if (idx[i] < 0) continue;
468:     if (idx[i] < start) start = idx[i];
469:     if (idx[i] > end)   end   = idx[i];
470:   }
471:   if (start > end) {start = 0; end = -1;}
472:   mapping->globalstart = start;
473:   mapping->globalend   = end;

475:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
476:   mapping->globals = globals;
477:   for (i=0; i<end-start+1; i++) globals[i] = -1;
478:   for (i=0; i<n; i++) {
479:     if (idx[i] < 0) continue;
480:     globals[idx[i] - start] = i;
481:   }

483:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
484:   return(0);
485: }

489: /*@
490:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
491:     specified with a global numbering.

493:     Not collective

495:     Input Parameters:
496: +   mapping - mapping between local and global numbering
497: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
498:            IS_GTOLM_DROP - drops the indices with no local value from the output list
499: .   n - number of global indices to map
500: -   idx - global indices to map

502:     Output Parameters:
503: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
504: -   idxout - local index of each global index, one must pass in an array long enough
505:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
506:              idxout == NULL to determine the required length (returned in nout)
507:              and then allocate the required space and call ISGlobalToLocalMappingApply()
508:              a second time to set the values.

510:     Notes:
511:     Either nout or idxout may be NULL. idx and idxout may be identical.

513:     This is not scalable in memory usage. Each processor requires O(Nglobal) size
514:     array to compute these.

516:     Level: advanced

518:     Developer Note: The manual page states that idx and idxout may be identical but the calling
519:        sequence declares idx as const so it cannot be the same as idxout.

521:     Concepts: mapping^global to local

523: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
524:           ISLocalToGlobalMappingDestroy()
525: @*/
526: PetscErrorCode  ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
527:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
528: {
529:   PetscInt       i,*globals,nf = 0,tmp,start,end;

534:   if (!mapping->globals) {
535:     ISGlobalToLocalMappingSetUp_Private(mapping);
536:   }
537:   globals = mapping->globals;
538:   start   = mapping->globalstart;
539:   end     = mapping->globalend;

541:   if (type == IS_GTOLM_MASK) {
542:     if (idxout) {
543:       for (i=0; i<n; i++) {
544:         if (idx[i] < 0) idxout[i] = idx[i];
545:         else if (idx[i] < start) idxout[i] = -1;
546:         else if (idx[i] > end)   idxout[i] = -1;
547:         else                     idxout[i] = globals[idx[i] - start];
548:       }
549:     }
550:     if (nout) *nout = n;
551:   } else {
552:     if (idxout) {
553:       for (i=0; i<n; i++) {
554:         if (idx[i] < 0) continue;
555:         if (idx[i] < start) continue;
556:         if (idx[i] > end) continue;
557:         tmp = globals[idx[i] - start];
558:         if (tmp < 0) continue;
559:         idxout[nf++] = tmp;
560:       }
561:     } else {
562:       for (i=0; i<n; i++) {
563:         if (idx[i] < 0) continue;
564:         if (idx[i] < start) continue;
565:         if (idx[i] > end) continue;
566:         tmp = globals[idx[i] - start];
567:         if (tmp < 0) continue;
568:         nf++;
569:       }
570:     }
571:     if (nout) *nout = nf;
572:   }
573:   return(0);
574: }

578: /*@C
579:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
580:      each index shared by more than one processor

582:     Collective on ISLocalToGlobalMapping

584:     Input Parameters:
585: .   mapping - the mapping from local to global indexing

587:     Output Parameter:
588: +   nproc - number of processors that are connected to this one
589: .   proc - neighboring processors
590: .   numproc - number of indices for each subdomain (processor)
591: -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)

593:     Level: advanced

595:     Concepts: mapping^local to global

597:     Fortran Usage:
598: $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by
599: $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
600:           PetscInt indices[nproc][numprocmax],ierr)
601:         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and
602:         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


605: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
606:           ISLocalToGlobalMappingRestoreInfo()
607: @*/
608: PetscErrorCode  ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
609: {
611:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
612:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
613:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
614:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
615:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
616:   PetscInt       first_procs,first_numprocs,*first_indices;
617:   MPI_Request    *recv_waits,*send_waits;
618:   MPI_Status     recv_status,*send_status,*recv_statuses;
619:   MPI_Comm       comm;
620:   PetscBool      debug = PETSC_FALSE;

624:   PetscObjectGetComm((PetscObject)mapping,&comm);
625:   MPI_Comm_size(comm,&size);
626:   MPI_Comm_rank(comm,&rank);
627:   if (size == 1) {
628:     *nproc         = 0;
629:     *procs         = NULL;
630:     PetscMalloc(sizeof(PetscInt),numprocs);
631:     (*numprocs)[0] = 0;
632:     PetscMalloc(sizeof(PetscInt*),indices);
633:     (*indices)[0]  = NULL;
634:     return(0);
635:   }

637:   PetscOptionsGetBool(NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,NULL);

639:   /*
640:     Notes on ISLocalToGlobalMappingGetInfo

642:     globally owned node - the nodes that have been assigned to this processor in global
643:            numbering, just for this routine.

645:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
646:            boundary (i.e. is has more than one local owner)

648:     locally owned node - node that exists on this processors subdomain

650:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
651:            local subdomain
652:   */
653:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
654:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
655:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

657:   for (i=0; i<n; i++) {
658:     if (lindices[i] > max) max = lindices[i];
659:   }
660:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
661:   Ng++;
662:   MPI_Comm_size(comm,&size);
663:   MPI_Comm_rank(comm,&rank);
664:   scale  = Ng/size + 1;
665:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
666:   rstart = scale*rank;

668:   /* determine ownership ranges of global indices */
669:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
670:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

672:   /* determine owners of each local node  */
673:   PetscMalloc(n*sizeof(PetscInt),&owner);
674:   for (i=0; i<n; i++) {
675:     proc             = lindices[i]/scale; /* processor that globally owns this index */
676:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
677:     owner[i]         = proc;
678:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
679:   }
680:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
681:   PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);

683:   /* inform other processors of number of messages and max length*/
684:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
685:   PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);

687:   /* post receives for owned rows */
688:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
689:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
690:   for (i=0; i<nrecvs; i++) {
691:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
692:   }

694:   /* pack messages containing lists of local nodes to owners */
695:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
696:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
697:   starts[0] = 0;
698:   for (i=1; i<size; i++) starts[i] = starts[i-1] + 2*nprocs[2*i-2];
699:   for (i=0; i<n; i++) {
700:     sends[starts[owner[i]]++] = lindices[i];
701:     sends[starts[owner[i]]++] = i;
702:   }
703:   PetscFree(owner);
704:   starts[0] = 0;
705:   for (i=1; i<size; i++) starts[i] = starts[i-1] + 2*nprocs[2*i-2];

707:   /* send the messages */
708:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
709:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
710:   cnt = 0;
711:   for (i=0; i<size; i++) {
712:     if (nprocs[2*i]) {
713:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
714:       dest[cnt] = i;
715:       cnt++;
716:     }
717:   }
718:   PetscFree(starts);

720:   /* wait on receives */
721:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
722:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
723:   cnt  = nrecvs;
724:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
725:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
726:   while (cnt) {
727:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
728:     /* unpack receives into our local space */
729:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
730:     source[imdex] = recv_status.MPI_SOURCE;
731:     len[imdex]    = len[imdex]/2;
732:     /* count how many local owners for each of my global owned indices */
733:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
734:     cnt--;
735:   }
736:   PetscFree(recv_waits);

738:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
739:   nowned  = 0;
740:   nownedm = 0;
741:   for (i=0; i<ng; i++) {
742:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
743:   }

745:   /* create single array to contain rank of all local owners of each globally owned index */
746:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
747:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
748:   starts[0] = 0;
749:   for (i=1; i<ng; i++) {
750:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
751:     else starts[i] = starts[i-1];
752:   }

754:   /* for each nontrival globally owned node list all arriving processors */
755:   for (i=0; i<nrecvs; i++) {
756:     for (j=0; j<len[i]; j++) {
757:       node = recvs[2*i*nmax+2*j]-rstart;
758:       if (nownedsenders[node] > 1) ownedsenders[starts[node]++] = source[i];
759:     }
760:   }

762:   if (debug) { /* -----------------------------------  */
763:     starts[0] = 0;
764:     for (i=1; i<ng; i++) {
765:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
766:       else starts[i] = starts[i-1];
767:     }
768:     for (i=0; i<ng; i++) {
769:       if (nownedsenders[i] > 1) {
770:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
771:         for (j=0; j<nownedsenders[i]; j++) {
772:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
773:         }
774:         PetscSynchronizedPrintf(comm,"\n");
775:       }
776:     }
777:     PetscSynchronizedFlush(comm);
778:   } /* -----------------------------------  */

780:   /* wait on original sends */
781:   if (nsends) {
782:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
783:     MPI_Waitall(nsends,send_waits,send_status);
784:     PetscFree(send_status);
785:   }
786:   PetscFree(send_waits);
787:   PetscFree(sends);
788:   PetscFree(nprocs);

790:   /* pack messages to send back to local owners */
791:   starts[0] = 0;
792:   for (i=1; i<ng; i++) {
793:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
794:     else starts[i] = starts[i-1];
795:   }
796:   nsends2 = nrecvs;
797:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
798:   for (i=0; i<nrecvs; i++) {
799:     nprocs[i] = 1;
800:     for (j=0; j<len[i]; j++) {
801:       node = recvs[2*i*nmax+2*j]-rstart;
802:       if (nownedsenders[node] > 1) nprocs[i] += 2 + nownedsenders[node];
803:     }
804:   }
805:   nt = 0;
806:   for (i=0; i<nsends2; i++) nt += nprocs[i];

808:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
809:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);

811:   starts2[0] = 0;
812:   for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
813:   /*
814:      Each message is 1 + nprocs[i] long, and consists of
815:        (0) the number of nodes being sent back
816:        (1) the local node number,
817:        (2) the number of processors sharing it,
818:        (3) the processors sharing it
819:   */
820:   for (i=0; i<nsends2; i++) {
821:     cnt = 1;
822:     sends2[starts2[i]] = 0;
823:     for (j=0; j<len[i]; j++) {
824:       node = recvs[2*i*nmax+2*j]-rstart;
825:       if (nownedsenders[node] > 1) {
826:         sends2[starts2[i]]++;
827:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
828:         sends2[starts2[i]+cnt++] = nownedsenders[node];
829:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
830:         cnt += nownedsenders[node];
831:       }
832:     }
833:   }

835:   /* receive the message lengths */
836:   nrecvs2 = nsends;
837:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
838:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
839:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
840:   for (i=0; i<nrecvs2; i++) {
841:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
842:   }

844:   /* send the message lengths */
845:   for (i=0; i<nsends2; i++) {
846:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
847:   }

849:   /* wait on receives of lens */
850:   if (nrecvs2) {
851:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
852:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
853:     PetscFree(recv_statuses);
854:   }
855:   PetscFree(recv_waits);

857:   starts3[0] = 0;
858:   nt         = 0;
859:   for (i=0; i<nrecvs2-1; i++) {
860:     starts3[i+1] = starts3[i] + lens2[i];
861:     nt          += lens2[i];
862:   }
863:   if (nrecvs2) nt += lens2[nrecvs2-1];

865:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
866:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
867:   for (i=0; i<nrecvs2; i++) {
868:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
869:   }

871:   /* send the messages */
872:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
873:   for (i=0; i<nsends2; i++) {
874:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
875:   }

877:   /* wait on receives */
878:   if (nrecvs2) {
879:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
880:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
881:     PetscFree(recv_statuses);
882:   }
883:   PetscFree(recv_waits);
884:   PetscFree(nprocs);

886:   if (debug) { /* -----------------------------------  */
887:     cnt = 0;
888:     for (i=0; i<nrecvs2; i++) {
889:       nt = recvs2[cnt++];
890:       for (j=0; j<nt; j++) {
891:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
892:         for (k=0; k<recvs2[cnt+1]; k++) {
893:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
894:         }
895:         cnt += 2 + recvs2[cnt+1];
896:         PetscSynchronizedPrintf(comm,"\n");
897:       }
898:     }
899:     PetscSynchronizedFlush(comm);
900:   } /* -----------------------------------  */

902:   /* count number subdomains for each local node */
903:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
904:   PetscMemzero(nprocs,size*sizeof(PetscInt));
905:   cnt  = 0;
906:   for (i=0; i<nrecvs2; i++) {
907:     nt = recvs2[cnt++];
908:     for (j=0; j<nt; j++) {
909:       for (k=0; k<recvs2[cnt+1]; k++) nprocs[recvs2[cnt+2+k]]++;
910:       cnt += 2 + recvs2[cnt+1];
911:     }
912:   }
913:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
914:   *nproc    = nt;
915:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
916:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
917:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
918:   for (i=0;i<nt+1;i++) (*indices)[i]=NULL;
919:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
920:   cnt       = 0;
921:   for (i=0; i<size; i++) {
922:     if (nprocs[i] > 0) {
923:       bprocs[i]        = cnt;
924:       (*procs)[cnt]    = i;
925:       (*numprocs)[cnt] = nprocs[i];
926:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
927:       cnt++;
928:     }
929:   }

931:   /* make the list of subdomains for each nontrivial local node */
932:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
933:   cnt  = 0;
934:   for (i=0; i<nrecvs2; i++) {
935:     nt = recvs2[cnt++];
936:     for (j=0; j<nt; j++) {
937:       for (k=0; k<recvs2[cnt+1]; k++) (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
938:       cnt += 2 + recvs2[cnt+1];
939:     }
940:   }
941:   PetscFree(bprocs);
942:   PetscFree(recvs2);

944:   /* sort the node indexing by their global numbers */
945:   nt = *nproc;
946:   for (i=0; i<nt; i++) {
947:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
948:     for (j=0; j<(*numprocs)[i]; j++) tmp[j] = lindices[(*indices)[i][j]];
949:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
950:     PetscFree(tmp);
951:   }

953:   if (debug) { /* -----------------------------------  */
954:     nt = *nproc;
955:     for (i=0; i<nt; i++) {
956:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
957:       for (j=0; j<(*numprocs)[i]; j++) {
958:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
959:       }
960:       PetscSynchronizedPrintf(comm,"\n");
961:     }
962:     PetscSynchronizedFlush(comm);
963:   } /* -----------------------------------  */

965:   /* wait on sends */
966:   if (nsends2) {
967:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
968:     MPI_Waitall(nsends2,send_waits,send_status);
969:     PetscFree(send_status);
970:   }

972:   PetscFree(starts3);
973:   PetscFree(dest);
974:   PetscFree(send_waits);

976:   PetscFree(nownedsenders);
977:   PetscFree(ownedsenders);
978:   PetscFree(starts);
979:   PetscFree(starts2);
980:   PetscFree(lens2);

982:   PetscFree(source);
983:   PetscFree(len);
984:   PetscFree(recvs);
985:   PetscFree(nprocs);
986:   PetscFree(sends2);

988:   /* put the information about myself as the first entry in the list */
989:   first_procs    = (*procs)[0];
990:   first_numprocs = (*numprocs)[0];
991:   first_indices  = (*indices)[0];
992:   for (i=0; i<*nproc; i++) {
993:     if ((*procs)[i] == rank) {
994:       (*procs)[0]    = (*procs)[i];
995:       (*numprocs)[0] = (*numprocs)[i];
996:       (*indices)[0]  = (*indices)[i];
997:       (*procs)[i]    = first_procs;
998:       (*numprocs)[i] = first_numprocs;
999:       (*indices)[i]  = first_indices;
1000:       break;
1001:     }
1002:   }
1003:   return(0);
1004: }

1008: /*@C
1009:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

1011:     Collective on ISLocalToGlobalMapping

1013:     Input Parameters:
1014: .   mapping - the mapping from local to global indexing

1016:     Output Parameter:
1017: +   nproc - number of processors that are connected to this one
1018: .   proc - neighboring processors
1019: .   numproc - number of indices for each processor
1020: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

1022:     Level: advanced

1024: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
1025:           ISLocalToGlobalMappingGetInfo()
1026: @*/
1027: PetscErrorCode  ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
1028: {
1030:   PetscInt       i;

1033:   PetscFree(*procs);
1034:   PetscFree(*numprocs);
1035:   if (*indices) {
1036:     PetscFree((*indices)[0]);
1037:     for (i=1; i<*nproc; i++) {
1038:       PetscFree((*indices)[i]);
1039:     }
1040:     PetscFree(*indices);
1041:   }
1042:   return(0);
1043: }

1047: /*@C
1048:    ISLocalToGlobalMappingGetIndices - Get global indices for every local point

1050:    Not Collective

1052:    Input Arguments:
1053: . ltog - local to global mapping

1055:    Output Arguments:
1056: . array - array of indices

1058:    Level: advanced

1060: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices()
1061: @*/
1062: PetscErrorCode  ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1063: {
1067:   *array = ltog->indices;
1068:   return(0);
1069: }

1073: /*@C
1074:    ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices()

1076:    Not Collective

1078:    Input Arguments:
1079: + ltog - local to global mapping
1080: - array - array of indices

1082:    Level: advanced

1084: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices()
1085: @*/
1086: PetscErrorCode  ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1087: {
1091:   if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer");
1092:   *array = NULL;
1093:   return(0);
1094: }

1098: /*@C
1099:    ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings

1101:    Not Collective

1103:    Input Arguments:
1104: + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate
1105: . n - number of mappings to concatenate
1106: - ltogs - local to global mappings

1108:    Output Arguments:
1109: . ltogcat - new mapping

1111:    Level: advanced

1113: .seealso: ISLocalToGlobalMappingCreate()
1114: @*/
1115: PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat)
1116: {
1117:   PetscInt       i,cnt,m,*idx;

1121:   if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n);
1125:   for (cnt=0,i=0; i<n; i++) {
1126:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1127:     cnt += m;
1128:   }
1129:   PetscMalloc(cnt*sizeof(PetscInt),&idx);
1130:   for (cnt=0,i=0; i<n; i++) {
1131:     const PetscInt *subidx;
1132:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1133:     ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);
1134:     PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));
1135:     ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);
1136:     cnt += m;
1137:   }
1138:   ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);
1139:   return(0);
1140: }