Actual source code: plexdistribute.c

petsc-3.10.5 2019-03-28
Report Typos and Errors
  1:  #include <petsc/private/dmpleximpl.h>
  2:  #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 27:   mesh->useradjacency = user;
 28:   mesh->useradjacencyctx = ctx;
 29:   return(0);
 30: }

 32: /*@C
 33:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 35:   Input Parameter:
 36: . dm      - The DM object

 38:   Output Parameters:
 39: - user    - The user callback
 40: - ctx     - context for callback evaluation

 42:   Level: advanced

 44: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 45: @*/
 46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 47: {
 48:   DM_Plex *mesh = (DM_Plex *)dm->data;

 52:   if (user) *user = mesh->useradjacency;
 53:   if (ctx) *ctx = mesh->useradjacencyctx;
 54:   return(0);
 55: }

 57: /*@
 58:   DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first

 60:   Input Parameters:
 61: + dm      - The DM object
 62: - useCone - Flag to use the cone first

 64:   Level: intermediate

 66:   Notes:
 67: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
 68: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
 69: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

 71: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
 72: @*/
 73: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
 74: {
 75:   PetscDS        prob;
 76:   PetscBool      useClosure;
 77:   PetscInt       Nf;

 81:   DMGetDS(dm, &prob);
 82:   PetscDSGetNumFields(prob, &Nf);
 83:   if (!Nf) {
 84:     PetscDSGetAdjacency(prob, PETSC_DEFAULT, NULL, &useClosure);
 85:     PetscDSSetAdjacency(prob, PETSC_DEFAULT, useCone, useClosure);
 86:   } else {
 87:     PetscDSGetAdjacency(prob, 0, NULL, &useClosure);
 88:     PetscDSSetAdjacency(prob, 0, useCone, useClosure);
 89:   }
 90:   return(0);
 91: }

 93: /*@
 94:   DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first

 96:   Input Parameter:
 97: . dm      - The DM object

 99:   Output Parameter:
100: . useCone - Flag to use the cone first

102:   Level: intermediate

104:   Notes:
105: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
106: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
107: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

109: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
110: @*/
111: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
112: {
113:   PetscDS        prob;
114:   PetscInt       Nf;

118:   DMGetDS(dm, &prob);
119:   PetscDSGetNumFields(prob, &Nf);
120:   if (!Nf) {
121:     PetscDSGetAdjacency(prob, PETSC_DEFAULT, useCone, NULL);
122:   } else {
123:     PetscDSGetAdjacency(prob, 0, useCone, NULL);
124:   }
125:   return(0);
126: }

128: /*@
129:   DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure

131:   Input Parameters:
132: + dm      - The DM object
133: - useClosure - Flag to use the closure

135:   Level: intermediate

137:   Notes:
138: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
139: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
140: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

142: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
143: @*/
144: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
145: {
146:   PetscDS        prob;
147:   PetscBool      useCone;
148:   PetscInt       Nf;

152:   DMGetDS(dm, &prob);
153:   PetscDSGetNumFields(prob, &Nf);
154:   if (!Nf) {
155:     PetscDSGetAdjacency(prob, PETSC_DEFAULT, &useCone, NULL);
156:     PetscDSSetAdjacency(prob, PETSC_DEFAULT, useCone, useClosure);
157:   } else {
158:     PetscDSGetAdjacency(prob, 0, &useCone, NULL);
159:     PetscDSSetAdjacency(prob, 0, useCone, useClosure);
160:   }
161:   return(0);
162: }

164: /*@
165:   DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure

167:   Input Parameter:
168: . dm      - The DM object

170:   Output Parameter:
171: . useClosure - Flag to use the closure

173:   Level: intermediate

175:   Notes:
176: $     FEM:   Two points p and q are adjacent if q \in closure(star(p)),   useCone = PETSC_FALSE, useClosure = PETSC_TRUE
177: $     FVM:   Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE,  useClosure = PETSC_FALSE
178: $     FVM++: Two points p and q are adjacent if q \in star(closure(p)),   useCone = PETSC_TRUE,  useClosure = PETSC_TRUE

180: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
181: @*/
182: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
183: {
184:   PetscDS        prob;
185:   PetscInt       Nf;

189:   DMGetDS(dm, &prob);
190:   PetscDSGetNumFields(prob, &Nf);
191:   if (!Nf) {
192:     PetscDSGetAdjacency(prob, PETSC_DEFAULT, NULL, useClosure);
193:   } else {
194:     PetscDSGetAdjacency(prob, 0, NULL, useClosure);
195:   }
196:   return(0);
197: }

199: /*@
200:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

202:   Input Parameters:
203: + dm      - The DM object
204: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

206:   Level: intermediate

208: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
209: @*/
210: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
211: {
212:   DM_Plex *mesh = (DM_Plex *) dm->data;

216:   mesh->useAnchors = useAnchors;
217:   return(0);
218: }

220: /*@
221:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

223:   Input Parameter:
224: . dm      - The DM object

226:   Output Parameter:
227: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

229:   Level: intermediate

231: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
232: @*/
233: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
234: {
235:   DM_Plex *mesh = (DM_Plex *) dm->data;

240:   *useAnchors = mesh->useAnchors;
241:   return(0);
242: }

244: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
245: {
246:   const PetscInt *cone = NULL;
247:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
248:   PetscErrorCode  ierr;

251:   DMPlexGetConeSize(dm, p, &coneSize);
252:   DMPlexGetCone(dm, p, &cone);
253:   for (c = 0; c <= coneSize; ++c) {
254:     const PetscInt  point   = !c ? p : cone[c-1];
255:     const PetscInt *support = NULL;
256:     PetscInt        supportSize, s, q;

258:     DMPlexGetSupportSize(dm, point, &supportSize);
259:     DMPlexGetSupport(dm, point, &support);
260:     for (s = 0; s < supportSize; ++s) {
261:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
262:         if (support[s] == adj[q]) break;
263:       }
264:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
265:     }
266:   }
267:   *adjSize = numAdj;
268:   return(0);
269: }

271: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
272: {
273:   const PetscInt *support = NULL;
274:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
275:   PetscErrorCode  ierr;

278:   DMPlexGetSupportSize(dm, p, &supportSize);
279:   DMPlexGetSupport(dm, p, &support);
280:   for (s = 0; s <= supportSize; ++s) {
281:     const PetscInt  point = !s ? p : support[s-1];
282:     const PetscInt *cone  = NULL;
283:     PetscInt        coneSize, c, q;

285:     DMPlexGetConeSize(dm, point, &coneSize);
286:     DMPlexGetCone(dm, point, &cone);
287:     for (c = 0; c < coneSize; ++c) {
288:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
289:         if (cone[c] == adj[q]) break;
290:       }
291:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
292:     }
293:   }
294:   *adjSize = numAdj;
295:   return(0);
296: }

298: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
299: {
300:   PetscInt      *star = NULL;
301:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

305:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
306:   for (s = 0; s < starSize*2; s += 2) {
307:     const PetscInt *closure = NULL;
308:     PetscInt        closureSize, c, q;

310:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
311:     for (c = 0; c < closureSize*2; c += 2) {
312:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
313:         if (closure[c] == adj[q]) break;
314:       }
315:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
316:     }
317:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
318:   }
319:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
320:   *adjSize = numAdj;
321:   return(0);
322: }

324: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
325: {
326:   static PetscInt asiz = 0;
327:   PetscInt maxAnchors = 1;
328:   PetscInt aStart = -1, aEnd = -1;
329:   PetscInt maxAdjSize;
330:   PetscSection aSec = NULL;
331:   IS aIS = NULL;
332:   const PetscInt *anchors;
333:   DM_Plex *mesh = (DM_Plex *)dm->data;
334:   PetscErrorCode  ierr;

337:   if (useAnchors) {
338:     DMPlexGetAnchors(dm,&aSec,&aIS);
339:     if (aSec) {
340:       PetscSectionGetMaxDof(aSec,&maxAnchors);
341:       maxAnchors = PetscMax(1,maxAnchors);
342:       PetscSectionGetChart(aSec,&aStart,&aEnd);
343:       ISGetIndices(aIS,&anchors);
344:     }
345:   }
346:   if (!*adj) {
347:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

349:     DMPlexGetChart(dm, &pStart,&pEnd);
350:     DMPlexGetDepth(dm, &depth);
351:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
352:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
353:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
354:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
355:     asiz *= maxAnchors;
356:     asiz  = PetscMin(asiz,pEnd-pStart);
357:     PetscMalloc1(asiz,adj);
358:   }
359:   if (*adjSize < 0) *adjSize = asiz;
360:   maxAdjSize = *adjSize;
361:   if (mesh->useradjacency) {
362:     mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);
363:   } else if (useTransitiveClosure) {
364:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
365:   } else if (useCone) {
366:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
367:   } else {
368:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
369:   }
370:   if (useAnchors && aSec) {
371:     PetscInt origSize = *adjSize;
372:     PetscInt numAdj = origSize;
373:     PetscInt i = 0, j;
374:     PetscInt *orig = *adj;

376:     while (i < origSize) {
377:       PetscInt p = orig[i];
378:       PetscInt aDof = 0;

380:       if (p >= aStart && p < aEnd) {
381:         PetscSectionGetDof(aSec,p,&aDof);
382:       }
383:       if (aDof) {
384:         PetscInt aOff;
385:         PetscInt s, q;

387:         for (j = i + 1; j < numAdj; j++) {
388:           orig[j - 1] = orig[j];
389:         }
390:         origSize--;
391:         numAdj--;
392:         PetscSectionGetOffset(aSec,p,&aOff);
393:         for (s = 0; s < aDof; ++s) {
394:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
395:             if (anchors[aOff+s] == orig[q]) break;
396:           }
397:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
398:         }
399:       }
400:       else {
401:         i++;
402:       }
403:     }
404:     *adjSize = numAdj;
405:     ISRestoreIndices(aIS,&anchors);
406:   }
407:   return(0);
408: }

410: /*@
411:   DMPlexGetAdjacency - Return all points adjacent to the given point

413:   Input Parameters:
414: + dm - The DM object
415: . p  - The point
416: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
417: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

419:   Output Parameters:
420: + adjSize - The number of adjacent points
421: - adj - The adjacent points

423:   Level: advanced

425:   Notes:
426:     The user must PetscFree the adj array if it was not passed in.

428: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
429: @*/
430: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
431: {
432:   PetscBool      useCone, useClosure, useAnchors;

439:   DMPlexGetAdjacencyUseCone(dm, &useCone);
440:   DMPlexGetAdjacencyUseClosure(dm, &useClosure);
441:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
442:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
443:   return(0);
444: }

446: /*@
447:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

449:   Collective on DM

451:   Input Parameters:
452: + dm      - The DM
453: - sfPoint - The PetscSF which encodes point connectivity

455:   Output Parameters:
456: + processRanks - A list of process neighbors, or NULL
457: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

459:   Level: developer

461: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
462: @*/
463: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
464: {
465:   const PetscSFNode *remotePoints;
466:   PetscInt          *localPointsNew;
467:   PetscSFNode       *remotePointsNew;
468:   const PetscInt    *nranks;
469:   PetscInt          *ranksNew;
470:   PetscBT            neighbors;
471:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
472:   PetscMPIInt        size, proc, rank;
473:   PetscErrorCode     ierr;

480:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
481:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
482:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
483:   PetscBTCreate(size, &neighbors);
484:   PetscBTMemzero(size, neighbors);
485:   /* Compute root-to-leaf process connectivity */
486:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
487:   ISGetIndices(rootRanks, &nranks);
488:   for (p = pStart; p < pEnd; ++p) {
489:     PetscInt ndof, noff, n;

491:     PetscSectionGetDof(rootRankSection, p, &ndof);
492:     PetscSectionGetOffset(rootRankSection, p, &noff);
493:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
494:   }
495:   ISRestoreIndices(rootRanks, &nranks);
496:   /* Compute leaf-to-neighbor process connectivity */
497:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
498:   ISGetIndices(leafRanks, &nranks);
499:   for (p = pStart; p < pEnd; ++p) {
500:     PetscInt ndof, noff, n;

502:     PetscSectionGetDof(leafRankSection, p, &ndof);
503:     PetscSectionGetOffset(leafRankSection, p, &noff);
504:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
505:   }
506:   ISRestoreIndices(leafRanks, &nranks);
507:   /* Compute leaf-to-root process connectivity */
508:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
509:   /* Calculate edges */
510:   PetscBTClear(neighbors, rank);
511:   for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
512:   PetscMalloc1(numNeighbors, &ranksNew);
513:   PetscMalloc1(numNeighbors, &localPointsNew);
514:   PetscMalloc1(numNeighbors, &remotePointsNew);
515:   for(proc = 0, n = 0; proc < size; ++proc) {
516:     if (PetscBTLookup(neighbors, proc)) {
517:       ranksNew[n]              = proc;
518:       localPointsNew[n]        = proc;
519:       remotePointsNew[n].index = rank;
520:       remotePointsNew[n].rank  = proc;
521:       ++n;
522:     }
523:   }
524:   PetscBTDestroy(&neighbors);
525:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
526:   else              {PetscFree(ranksNew);}
527:   if (sfProcess) {
528:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
529:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
530:     PetscSFSetFromOptions(*sfProcess);
531:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
532:   }
533:   return(0);
534: }

536: /*@
537:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

539:   Collective on DM

541:   Input Parameter:
542: . dm - The DM

544:   Output Parameters:
545: + rootSection - The number of leaves for a given root point
546: . rootrank    - The rank of each edge into the root point
547: . leafSection - The number of processes sharing a given leaf point
548: - leafrank    - The rank of each process sharing a leaf point

550:   Level: developer

552: .seealso: DMPlexCreateOverlap()
553: @*/
554: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
555: {
556:   MPI_Comm        comm;
557:   PetscSF         sfPoint;
558:   const PetscInt *rootdegree;
559:   PetscInt       *myrank, *remoterank;
560:   PetscInt        pStart, pEnd, p, nedges;
561:   PetscMPIInt     rank;
562:   PetscErrorCode  ierr;

565:   PetscObjectGetComm((PetscObject) dm, &comm);
566:   MPI_Comm_rank(comm, &rank);
567:   DMPlexGetChart(dm, &pStart, &pEnd);
568:   DMGetPointSF(dm, &sfPoint);
569:   /* Compute number of leaves for each root */
570:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
571:   PetscSectionSetChart(rootSection, pStart, pEnd);
572:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
573:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
574:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
575:   PetscSectionSetUp(rootSection);
576:   /* Gather rank of each leaf to root */
577:   PetscSectionGetStorageSize(rootSection, &nedges);
578:   PetscMalloc1(pEnd-pStart, &myrank);
579:   PetscMalloc1(nedges,  &remoterank);
580:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
581:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
582:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
583:   PetscFree(myrank);
584:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
585:   /* Distribute remote ranks to leaves */
586:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
587:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
588:   return(0);
589: }

591: /*@C
592:   DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.

594:   Collective on DM

596:   Input Parameters:
597: + dm          - The DM
598: . levels      - Number of overlap levels
599: . rootSection - The number of leaves for a given root point
600: . rootrank    - The rank of each edge into the root point
601: . leafSection - The number of processes sharing a given leaf point
602: - leafrank    - The rank of each process sharing a leaf point

604:   Output Parameters:
605: + ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

607:   Level: developer

609: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
610: @*/
611: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
612: {
613:   MPI_Comm           comm;
614:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
615:   PetscSF            sfPoint, sfProc;
616:   const PetscSFNode *remote;
617:   const PetscInt    *local;
618:   const PetscInt    *nrank, *rrank;
619:   PetscInt          *adj = NULL;
620:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
621:   PetscMPIInt        rank, size;
622:   PetscBool          useCone, useClosure, flg;
623:   PetscErrorCode     ierr;

626:   PetscObjectGetComm((PetscObject) dm, &comm);
627:   MPI_Comm_size(comm, &size);
628:   MPI_Comm_rank(comm, &rank);
629:   DMGetPointSF(dm, &sfPoint);
630:   DMPlexGetChart(dm, &pStart, &pEnd);
631:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
632:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
633:   DMLabelCreate("Overlap adjacency", &ovAdjByRank);
634:   /* Handle leaves: shared with the root point */
635:   for (l = 0; l < nleaves; ++l) {
636:     PetscInt adjSize = PETSC_DETERMINE, a;

638:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
639:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
640:   }
641:   ISGetIndices(rootrank, &rrank);
642:   ISGetIndices(leafrank, &nrank);
643:   /* Handle roots */
644:   for (p = pStart; p < pEnd; ++p) {
645:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

647:     if ((p >= sStart) && (p < sEnd)) {
648:       /* Some leaves share a root with other leaves on different processes */
649:       PetscSectionGetDof(leafSection, p, &neighbors);
650:       if (neighbors) {
651:         PetscSectionGetOffset(leafSection, p, &noff);
652:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
653:         for (n = 0; n < neighbors; ++n) {
654:           const PetscInt remoteRank = nrank[noff+n];

656:           if (remoteRank == rank) continue;
657:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
658:         }
659:       }
660:     }
661:     /* Roots are shared with leaves */
662:     PetscSectionGetDof(rootSection, p, &neighbors);
663:     if (!neighbors) continue;
664:     PetscSectionGetOffset(rootSection, p, &noff);
665:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
666:     for (n = 0; n < neighbors; ++n) {
667:       const PetscInt remoteRank = rrank[noff+n];

669:       if (remoteRank == rank) continue;
670:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
671:     }
672:   }
673:   PetscFree(adj);
674:   ISRestoreIndices(rootrank, &rrank);
675:   ISRestoreIndices(leafrank, &nrank);
676:   /* Add additional overlap levels */
677:   for (l = 1; l < levels; l++) {
678:     /* Propagate point donations over SF to capture remote connections */
679:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
680:     /* Add next level of point donations to the label */
681:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
682:   }
683:   /* We require the closure in the overlap */
684:   DMPlexGetAdjacencyUseCone(dm, &useCone);
685:   DMPlexGetAdjacencyUseClosure(dm, &useClosure);
686:   if (useCone || !useClosure) {
687:     DMPlexPartitionLabelClosure(dm, ovAdjByRank);
688:   }
689:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
690:   if (flg) {
691:     DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
692:   }
693:   /* Make global process SF and invert sender to receiver label */
694:   {
695:     /* Build a global process SF */
696:     PetscSFNode *remoteProc;
697:     PetscMalloc1(size, &remoteProc);
698:     for (p = 0; p < size; ++p) {
699:       remoteProc[p].rank  = p;
700:       remoteProc[p].index = rank;
701:     }
702:     PetscSFCreate(comm, &sfProc);
703:     PetscObjectSetName((PetscObject) sfProc, "Process SF");
704:     PetscSFSetGraph(sfProc, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
705:   }
706:   DMLabelCreate("Overlap label", ovLabel);
707:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
708:   /* Add owned points, except for shared local points */
709:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
710:   for (l = 0; l < nleaves; ++l) {
711:     DMLabelClearValue(*ovLabel, local[l], rank);
712:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
713:   }
714:   /* Clean up */
715:   DMLabelDestroy(&ovAdjByRank);
716:   PetscSFDestroy(&sfProc);
717:   return(0);
718: }

720: /*@C
721:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

723:   Collective on DM

725:   Input Parameters:
726: + dm          - The DM
727: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

729:   Output Parameters:
730: + migrationSF - An SF that maps original points in old locations to points in new locations

732:   Level: developer

734: .seealso: DMPlexCreateOverlap(), DMPlexDistribute()
735: @*/
736: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
737: {
738:   MPI_Comm           comm;
739:   PetscMPIInt        rank, size;
740:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
741:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
742:   PetscInt          *depthRecv, *depthShift, *depthIdx;
743:   PetscSFNode       *iremote;
744:   PetscSF            pointSF;
745:   const PetscInt    *sharedLocal;
746:   const PetscSFNode *overlapRemote, *sharedRemote;
747:   PetscErrorCode     ierr;

751:   PetscObjectGetComm((PetscObject)dm, &comm);
752:   MPI_Comm_rank(comm, &rank);
753:   MPI_Comm_size(comm, &size);
754:   DMGetDimension(dm, &dim);

756:   /* Before building the migration SF we need to know the new stratum offsets */
757:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
758:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
759:   for (d=0; d<dim+1; d++) {
760:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
761:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
762:   }
763:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
764:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
765:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

767:   /* Count recevied points in each stratum and compute the internal strata shift */
768:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
769:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
770:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
771:   depthShift[dim] = 0;
772:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
773:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
774:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
775:   for (d=0; d<dim+1; d++) {
776:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
777:     depthIdx[d] = pStart + depthShift[d];
778:   }

780:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
781:   DMPlexGetChart(dm, &pStart, &pEnd);
782:   newLeaves = pEnd - pStart + nleaves;
783:   PetscMalloc1(newLeaves, &ilocal);
784:   PetscMalloc1(newLeaves, &iremote);
785:   /* First map local points to themselves */
786:   for (d=0; d<dim+1; d++) {
787:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
788:     for (p=pStart; p<pEnd; p++) {
789:       point = p + depthShift[d];
790:       ilocal[point] = point;
791:       iremote[point].index = p;
792:       iremote[point].rank = rank;
793:       depthIdx[d]++;
794:     }
795:   }

797:   /* Add in the remote roots for currently shared points */
798:   DMGetPointSF(dm, &pointSF);
799:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
800:   for (d=0; d<dim+1; d++) {
801:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
802:     for (p=0; p<numSharedPoints; p++) {
803:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
804:         point = sharedLocal[p] + depthShift[d];
805:         iremote[point].index = sharedRemote[p].index;
806:         iremote[point].rank = sharedRemote[p].rank;
807:       }
808:     }
809:   }

811:   /* Now add the incoming overlap points */
812:   for (p=0; p<nleaves; p++) {
813:     point = depthIdx[remoteDepths[p]];
814:     ilocal[point] = point;
815:     iremote[point].index = overlapRemote[p].index;
816:     iremote[point].rank = overlapRemote[p].rank;
817:     depthIdx[remoteDepths[p]]++;
818:   }
819:   PetscFree2(pointDepths,remoteDepths);

821:   PetscSFCreate(comm, migrationSF);
822:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
823:   PetscSFSetFromOptions(*migrationSF);
824:   DMPlexGetChart(dm, &pStart, &pEnd);
825:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

827:   PetscFree3(depthRecv, depthShift, depthIdx);
828:   return(0);
829: }

831: /*@
832:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

834:   Input Parameter:
835: + dm          - The DM
836: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

838:   Output Parameter:
839: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

841:   Level: developer

843: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
844: @*/
845: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
846: {
847:   MPI_Comm           comm;
848:   PetscMPIInt        rank, size;
849:   PetscInt           d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
850:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
851:   PetscInt          *depthRecv, *depthShift, *depthIdx;
852:   PetscInt           hybEnd[4];
853:   const PetscSFNode *iremote;
854:   PetscErrorCode     ierr;

858:   PetscObjectGetComm((PetscObject) dm, &comm);
859:   MPI_Comm_rank(comm, &rank);
860:   MPI_Comm_size(comm, &size);
861:   DMPlexGetDepth(dm, &ldepth);
862:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
863:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);

865:   /* Before building the migration SF we need to know the new stratum offsets */
866:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
867:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
868:   DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);
869:   for (d = 0; d < depth+1; ++d) {
870:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
871:     for (p = pStart; p < pEnd; ++p) {
872:       if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
873:         pointDepths[p] = 2 * d;
874:       } else {
875:         pointDepths[p] = 2 * d + 1;
876:       }
877:     }
878:   }
879:   for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
880:   PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
881:   PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
882:   /* Count recevied points in each stratum and compute the internal strata shift */
883:   PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
884:   for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
885:   for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
886:   depthShift[2*depth+1] = 0;
887:   for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
888:   for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
889:   depthShift[0] += depthRecv[1];
890:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
891:   for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
892:   for (d = 2 * depth-1; d > 2; --d) {
893:     PetscInt e;

895:     for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
896:   }
897:   for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
898:   /* Derive a new local permutation based on stratified indices */
899:   PetscMalloc1(nleaves, &ilocal);
900:   for (p = 0; p < nleaves; ++p) {
901:     const PetscInt dep = remoteDepths[p];

903:     ilocal[p] = depthShift[dep] + depthIdx[dep];
904:     depthIdx[dep]++;
905:   }
906:   PetscSFCreate(comm, migrationSF);
907:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
908:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
909:   PetscFree2(pointDepths,remoteDepths);
910:   PetscFree3(depthRecv, depthShift, depthIdx);
911:   return(0);
912: }

914: /*@
915:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

917:   Collective on DM

919:   Input Parameters:
920: + dm - The DMPlex object
921: . pointSF - The PetscSF describing the communication pattern
922: . originalSection - The PetscSection for existing data layout
923: - originalVec - The existing data

925:   Output Parameters:
926: + newSection - The PetscSF describing the new data layout
927: - newVec - The new data

929:   Level: developer

931: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
932: @*/
933: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
934: {
935:   PetscSF        fieldSF;
936:   PetscInt      *remoteOffsets, fieldSize;
937:   PetscScalar   *originalValues, *newValues;

941:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
942:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

944:   PetscSectionGetStorageSize(newSection, &fieldSize);
945:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
946:   VecSetType(newVec,dm->vectype);

948:   VecGetArray(originalVec, &originalValues);
949:   VecGetArray(newVec, &newValues);
950:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
951:   PetscFree(remoteOffsets);
952:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
953:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
954:   PetscSFDestroy(&fieldSF);
955:   VecRestoreArray(newVec, &newValues);
956:   VecRestoreArray(originalVec, &originalValues);
957:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
958:   return(0);
959: }

961: /*@
962:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

964:   Collective on DM

966:   Input Parameters:
967: + dm - The DMPlex object
968: . pointSF - The PetscSF describing the communication pattern
969: . originalSection - The PetscSection for existing data layout
970: - originalIS - The existing data

972:   Output Parameters:
973: + newSection - The PetscSF describing the new data layout
974: - newIS - The new data

976:   Level: developer

978: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
979: @*/
980: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
981: {
982:   PetscSF         fieldSF;
983:   PetscInt       *newValues, *remoteOffsets, fieldSize;
984:   const PetscInt *originalValues;
985:   PetscErrorCode  ierr;

988:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
989:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

991:   PetscSectionGetStorageSize(newSection, &fieldSize);
992:   PetscMalloc1(fieldSize, &newValues);

994:   ISGetIndices(originalIS, &originalValues);
995:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
996:   PetscFree(remoteOffsets);
997:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
998:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
999:   PetscSFDestroy(&fieldSF);
1000:   ISRestoreIndices(originalIS, &originalValues);
1001:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
1002:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
1003:   return(0);
1004: }

1006: /*@
1007:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

1009:   Collective on DM

1011:   Input Parameters:
1012: + dm - The DMPlex object
1013: . pointSF - The PetscSF describing the communication pattern
1014: . originalSection - The PetscSection for existing data layout
1015: . datatype - The type of data
1016: - originalData - The existing data

1018:   Output Parameters:
1019: + newSection - The PetscSection describing the new data layout
1020: - newData - The new data

1022:   Level: developer

1024: .seealso: DMPlexDistribute(), DMPlexDistributeField()
1025: @*/
1026: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1027: {
1028:   PetscSF        fieldSF;
1029:   PetscInt      *remoteOffsets, fieldSize;
1030:   PetscMPIInt    dataSize;

1034:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
1035:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

1037:   PetscSectionGetStorageSize(newSection, &fieldSize);
1038:   MPI_Type_size(datatype, &dataSize);
1039:   PetscMalloc(fieldSize * dataSize, newData);

1041:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
1042:   PetscFree(remoteOffsets);
1043:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
1044:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
1045:   PetscSFDestroy(&fieldSF);
1046:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
1047:   return(0);
1048: }

1050: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1051: {
1052:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
1053:   MPI_Comm               comm;
1054:   PetscSF                coneSF;
1055:   PetscSection           originalConeSection, newConeSection;
1056:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1057:   PetscBool              flg;
1058:   PetscErrorCode         ierr;


1064:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
1065:   /* Distribute cone section */
1066:   PetscObjectGetComm((PetscObject)dm, &comm);
1067:   DMPlexGetConeSection(dm, &originalConeSection);
1068:   DMPlexGetConeSection(dmParallel, &newConeSection);
1069:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
1070:   DMSetUp(dmParallel);
1071:   {
1072:     PetscInt pStart, pEnd, p;

1074:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
1075:     for (p = pStart; p < pEnd; ++p) {
1076:       PetscInt coneSize;
1077:       PetscSectionGetDof(newConeSection, p, &coneSize);
1078:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
1079:     }
1080:   }
1081:   /* Communicate and renumber cones */
1082:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
1083:   PetscFree(remoteOffsets);
1084:   DMPlexGetCones(dm, &cones);
1085:   if (original) {
1086:     PetscInt numCones;

1088:     PetscSectionGetStorageSize(originalConeSection,&numCones); PetscMalloc1(numCones,&globCones);
1089:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
1090:   }
1091:   else {
1092:     globCones = cones;
1093:   }
1094:   DMPlexGetCones(dmParallel, &newCones);
1095:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
1096:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
1097:   if (original) {
1098:     PetscFree(globCones);
1099:   }
1100:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
1101:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1102: #if defined(PETSC_USE_DEBUG)
1103:   {
1104:     PetscInt  p;
1105:     PetscBool valid = PETSC_TRUE;
1106:     for (p = 0; p < newConesSize; ++p) {
1107:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1108:     }
1109:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1110:   }
1111: #endif
1112:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
1113:   if (flg) {
1114:     PetscPrintf(comm, "Serial Cone Section:\n");
1115:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1116:     PetscPrintf(comm, "Parallel Cone Section:\n");
1117:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1118:     PetscSFView(coneSF, NULL);
1119:   }
1120:   DMPlexGetConeOrientations(dm, &cones);
1121:   DMPlexGetConeOrientations(dmParallel, &newCones);
1122:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1123:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1124:   PetscSFDestroy(&coneSF);
1125:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1126:   /* Create supports and stratify DMPlex */
1127:   {
1128:     PetscInt pStart, pEnd;

1130:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1131:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1132:   }
1133:   DMPlexSymmetrize(dmParallel);
1134:   DMPlexStratify(dmParallel);
1135:   {
1136:     PetscBool useCone, useClosure, useAnchors;

1138:     DMPlexGetAdjacencyUseCone(dm, &useCone);
1139:     DMPlexGetAdjacencyUseClosure(dm, &useClosure);
1140:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1141:     DMPlexSetAdjacencyUseCone(dmParallel, useCone);
1142:     DMPlexSetAdjacencyUseClosure(dmParallel, useClosure);
1143:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1144:   }
1145:   return(0);
1146: }

1148: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1149: {
1150:   MPI_Comm         comm;
1151:   PetscSection     originalCoordSection, newCoordSection;
1152:   Vec              originalCoordinates, newCoordinates;
1153:   PetscInt         bs;
1154:   PetscBool        isper;
1155:   const char      *name;
1156:   const PetscReal *maxCell, *L;
1157:   const DMBoundaryType *bd;
1158:   PetscErrorCode   ierr;


1164:   PetscObjectGetComm((PetscObject)dm, &comm);
1165:   DMGetCoordinateSection(dm, &originalCoordSection);
1166:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1167:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1168:   if (originalCoordinates) {
1169:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1170:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1171:     PetscObjectSetName((PetscObject) newCoordinates, name);

1173:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1174:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1175:     VecGetBlockSize(originalCoordinates, &bs);
1176:     VecSetBlockSize(newCoordinates, bs);
1177:     VecDestroy(&newCoordinates);
1178:   }
1179:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1180:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1181:   return(0);
1182: }

1184: /* Here we are assuming that process 0 always has everything */
1185: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1186: {
1187:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1188:   MPI_Comm         comm;
1189:   DMLabel          depthLabel;
1190:   PetscMPIInt      rank;
1191:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1192:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1193:   PetscObjectState depthState = -1;
1194:   PetscErrorCode   ierr;


1200:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1201:   PetscObjectGetComm((PetscObject)dm, &comm);
1202:   MPI_Comm_rank(comm, &rank);

1204:   /* If the user has changed the depth label, communicate it instead */
1205:   DMPlexGetDepth(dm, &depth);
1206:   DMPlexGetDepthLabel(dm, &depthLabel);
1207:   if (depthLabel) {DMLabelGetState(depthLabel, &depthState);}
1208:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1209:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1210:   if (sendDepth) {
1211:     DMRemoveLabel(dmParallel, "depth", &depthLabel);
1212:     DMLabelDestroy(&depthLabel);
1213:   }
1214:   /* Everyone must have either the same number of labels, or none */
1215:   DMGetNumLabels(dm, &numLocalLabels);
1216:   numLabels = numLocalLabels;
1217:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1218:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1219:   for (l = numLabels-1; l >= 0; --l) {
1220:     DMLabel     label = NULL, labelNew = NULL;
1221:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;

1223:     if (hasLabels) {DMGetLabelByNum(dm, l, &label);}
1224:     /* Skip "depth" because it is recreated */
1225:     if (hasLabels) {PetscStrcmp(label->name, "depth", &isDepth);}
1226:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1227:     if (isDepth && !sendDepth) continue;
1228:     DMLabelDistribute(label, migrationSF, &labelNew);
1229:     if (isDepth) {
1230:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1231:       PetscInt gdepth;

1233:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1234:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1235:       for (d = 0; d <= gdepth; ++d) {
1236:         PetscBool has;

1238:         DMLabelHasStratum(labelNew, d, &has);
1239:         if (!has) {DMLabelAddStratum(labelNew, d);}
1240:       }
1241:     }
1242:     DMAddLabel(dmParallel, labelNew);
1243:     /* Put the output flag in the new label */
1244:     if (hasLabels) {DMGetLabelOutput(dm, label->name, &lisOutput);}
1245:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1246:     DMSetLabelOutput(dmParallel, labelNew->name, isOutput);
1247:   }
1248:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1249:   return(0);
1250: }

1252: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1253: {
1254:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1255:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1256:   PetscBool      *isHybrid, *isHybridParallel;
1257:   PetscInt        dim, depth, d;
1258:   PetscInt        pStart, pEnd, pStartP, pEndP;
1259:   PetscErrorCode  ierr;


1265:   DMGetDimension(dm, &dim);
1266:   DMPlexGetDepth(dm, &depth);
1267:   DMPlexGetChart(dm,&pStart,&pEnd);
1268:   DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1269:   PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1270:   for (d = 0; d <= depth; d++) {
1271:     PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];

1273:     if (hybridMax >= 0) {
1274:       PetscInt sStart, sEnd, p;

1276:       DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1277:       for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1278:     }
1279:   }
1280:   PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1281:   PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1282:   for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1283:   for (d = 0; d <= depth; d++) {
1284:     PetscInt sStart, sEnd, p, dd;

1286:     DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1287:     dd = (depth == 1 && d == 1) ? dim : d;
1288:     for (p = sStart; p < sEnd; p++) {
1289:       if (isHybridParallel[p-pStartP]) {
1290:         pmesh->hybridPointMax[dd] = p;
1291:         break;
1292:       }
1293:     }
1294:   }
1295:   PetscFree2(isHybrid,isHybridParallel);
1296:   return(0);
1297: }

1299: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1300: {
1301:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1302:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1303:   MPI_Comm        comm;
1304:   DM              refTree;
1305:   PetscSection    origParentSection, newParentSection;
1306:   PetscInt        *origParents, *origChildIDs;
1307:   PetscBool       flg;
1308:   PetscErrorCode  ierr;

1313:   PetscObjectGetComm((PetscObject)dm, &comm);

1315:   /* Set up tree */
1316:   DMPlexGetReferenceTree(dm,&refTree);
1317:   DMPlexSetReferenceTree(dmParallel,refTree);
1318:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1319:   if (origParentSection) {
1320:     PetscInt        pStart, pEnd;
1321:     PetscInt        *newParents, *newChildIDs, *globParents;
1322:     PetscInt        *remoteOffsetsParents, newParentSize;
1323:     PetscSF         parentSF;

1325:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1326:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1327:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1328:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1329:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1330:     PetscFree(remoteOffsetsParents);
1331:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1332:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1333:     if (original) {
1334:       PetscInt numParents;

1336:       PetscSectionGetStorageSize(origParentSection,&numParents);
1337:       PetscMalloc1(numParents,&globParents);
1338:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1339:     }
1340:     else {
1341:       globParents = origParents;
1342:     }
1343:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1344:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1345:     if (original) {
1346:       PetscFree(globParents);
1347:     }
1348:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1349:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1350:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1351: #if defined(PETSC_USE_DEBUG)
1352:     {
1353:       PetscInt  p;
1354:       PetscBool valid = PETSC_TRUE;
1355:       for (p = 0; p < newParentSize; ++p) {
1356:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1357:       }
1358:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1359:     }
1360: #endif
1361:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1362:     if (flg) {
1363:       PetscPrintf(comm, "Serial Parent Section: \n");
1364:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1365:       PetscPrintf(comm, "Parallel Parent Section: \n");
1366:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1367:       PetscSFView(parentSF, NULL);
1368:     }
1369:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1370:     PetscSectionDestroy(&newParentSection);
1371:     PetscFree2(newParents,newChildIDs);
1372:     PetscSFDestroy(&parentSF);
1373:   }
1374:   pmesh->useAnchors = mesh->useAnchors;
1375:   return(0);
1376: }

1378: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1379: {
1380:   PetscMPIInt            rank, size;
1381:   MPI_Comm               comm;
1382:   PetscErrorCode         ierr;


1388:   /* Create point SF for parallel mesh */
1389:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1390:   PetscObjectGetComm((PetscObject)dm, &comm);
1391:   MPI_Comm_rank(comm, &rank);
1392:   MPI_Comm_size(comm, &size);
1393:   {
1394:     const PetscInt *leaves;
1395:     PetscSFNode    *remotePoints, *rowners, *lowners;
1396:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1397:     PetscInt        pStart, pEnd;

1399:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1400:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1401:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1402:     for (p=0; p<numRoots; p++) {
1403:       rowners[p].rank  = -1;
1404:       rowners[p].index = -1;
1405:     }
1406:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1407:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1408:     for (p = 0; p < numLeaves; ++p) {
1409:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1410:         lowners[p].rank  = rank;
1411:         lowners[p].index = leaves ? leaves[p] : p;
1412:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1413:         lowners[p].rank  = -2;
1414:         lowners[p].index = -2;
1415:       }
1416:     }
1417:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1418:       rowners[p].rank  = -3;
1419:       rowners[p].index = -3;
1420:     }
1421:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1422:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1423:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1424:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1425:     for (p = 0; p < numLeaves; ++p) {
1426:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1427:       if (lowners[p].rank != rank) ++numGhostPoints;
1428:     }
1429:     PetscMalloc1(numGhostPoints, &ghostPoints);
1430:     PetscMalloc1(numGhostPoints, &remotePoints);
1431:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1432:       if (lowners[p].rank != rank) {
1433:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1434:         remotePoints[gp].rank  = lowners[p].rank;
1435:         remotePoints[gp].index = lowners[p].index;
1436:         ++gp;
1437:       }
1438:     }
1439:     PetscFree2(rowners,lowners);
1440:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1441:     PetscSFSetFromOptions((dmParallel)->sf);
1442:   }
1443:   {
1444:     PetscBool useCone, useClosure, useAnchors;

1446:     DMPlexGetAdjacencyUseCone(dm, &useCone);
1447:     DMPlexGetAdjacencyUseClosure(dm, &useClosure);
1448:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1449:     DMPlexSetAdjacencyUseCone(dmParallel, useCone);
1450:     DMPlexSetAdjacencyUseClosure(dmParallel, useClosure);
1451:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1452:   }
1453:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1454:   return(0);
1455: }

1457: /*@
1458:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1460:   Input Parameters:
1461: + dm - The DMPlex object
1462: - flg - Balance the partition?

1464:   Level: intermediate

1466: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1467: @*/
1468: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1469: {
1470:   DM_Plex *mesh = (DM_Plex *)dm->data;

1473:   mesh->partitionBalance = flg;
1474:   return(0);
1475: }

1477: /*@
1478:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1480:   Input Parameter:
1481: + dm - The DMPlex object

1483:   Output Parameter:
1484: + flg - Balance the partition?

1486:   Level: intermediate

1488: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1489: @*/
1490: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1491: {
1492:   DM_Plex *mesh = (DM_Plex *)dm->data;

1497:   *flg = mesh->partitionBalance;
1498:   return(0);
1499: }

1501: /*@C
1502:   DMPlexDerivePointSF - Build a point SF from an SF describing a point migration

1504:   Input Parameter:
1505: + dm          - The source DMPlex object
1506: . migrationSF - The star forest that describes the parallel point remapping
1507: . ownership   - Flag causing a vote to determine point ownership

1509:   Output Parameter:
1510: - pointSF     - The star forest describing the point overlap in the remapped DM

1512:   Level: developer

1514: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1515: @*/
1516: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1517: {
1518:   PetscMPIInt        rank, size;
1519:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1520:   PetscInt          *pointLocal;
1521:   const PetscInt    *leaves;
1522:   const PetscSFNode *roots;
1523:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1524:   Vec                shifts;
1525:   const PetscInt     numShifts = 13759;
1526:   const PetscScalar *shift = NULL;
1527:   const PetscBool    shiftDebug = PETSC_FALSE;
1528:   PetscBool          balance;
1529:   PetscErrorCode     ierr;

1533:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1534:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);

1536:   DMPlexGetPartitionBalance(dm, &balance);
1537:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1538:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1539:   if (ownership) {
1540:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1541:     if (balance) {
1542:       PetscRandom r;

1544:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1545:       PetscRandomSetInterval(r, 0, 2467*size);
1546:       VecCreate(PETSC_COMM_SELF, &shifts);
1547:       VecSetSizes(shifts, numShifts, numShifts);
1548:       VecSetType(shifts, VECSTANDARD);
1549:       VecSetRandom(shifts, r);
1550:       PetscRandomDestroy(&r);
1551:       VecGetArrayRead(shifts, &shift);
1552:     }

1554:     /* Point ownership vote: Process with highest rank owns shared points */
1555:     for (p = 0; p < nleaves; ++p) {
1556:       if (shiftDebug) {
1557:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1558:       }
1559:       /* Either put in a bid or we know we own it */
1560:       leafNodes[p].rank  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1561:       leafNodes[p].index = p;
1562:     }
1563:     for (p = 0; p < nroots; p++) {
1564:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1565:       rootNodes[p].rank  = -3;
1566:       rootNodes[p].index = -3;
1567:     }
1568:     PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1569:     PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1570:     if (balance) {
1571:       /* We've voted, now we need to get the rank.  When we're balancing the partition, the "rank" in rootNotes is not
1572:        * the rank but rather (rank + random)%size.  So we do another reduction, voting the same way, but sending the
1573:        * rank instead of the index. */
1574:       PetscSFNode *rootRanks = NULL;
1575:       PetscMalloc1(nroots, &rootRanks);
1576:       for (p = 0; p < nroots; p++) {
1577:         rootRanks[p].rank = -3;
1578:         rootRanks[p].index = -3;
1579:       }
1580:       for (p = 0; p < nleaves; p++) leafNodes[p].index = rank;
1581:       PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1582:       PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1583:       for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index;
1584:       PetscFree(rootRanks);
1585:     }
1586:   } else {
1587:     for (p = 0; p < nroots; p++) {
1588:       rootNodes[p].index = -1;
1589:       rootNodes[p].rank = rank;
1590:     };
1591:     for (p = 0; p < nleaves; p++) {
1592:       /* Write new local id into old location */
1593:       if (roots[p].rank == rank) {
1594:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1595:       }
1596:     }
1597:   }
1598:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1599:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1601:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1602:     if (leafNodes[p].rank != rank) npointLeaves++;
1603:   }
1604:   PetscMalloc1(npointLeaves, &pointLocal);
1605:   PetscMalloc1(npointLeaves, &pointRemote);
1606:   for (idx = 0, p = 0; p < nleaves; p++) {
1607:     if (leafNodes[p].rank != rank) {
1608:       pointLocal[idx] = p;
1609:       pointRemote[idx] = leafNodes[p];
1610:       idx++;
1611:     }
1612:   }
1613:   if (shift) {
1614:     VecRestoreArrayRead(shifts, &shift);
1615:     VecDestroy(&shifts);
1616:   }
1617:   if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1618:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1619:   PetscSFSetFromOptions(*pointSF);
1620:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1621:   PetscFree2(rootNodes, leafNodes);
1622:   return(0);
1623: }

1625: /*@C
1626:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1627:   
1628:   Collective on DM and PetscSF

1630:   Input Parameter:
1631: + dm       - The source DMPlex object
1632: . sf       - The star forest communication context describing the migration pattern

1634:   Output Parameter:
1635: - targetDM - The target DMPlex object

1637:   Level: intermediate

1639: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1640: @*/
1641: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1642: {
1643:   MPI_Comm               comm;
1644:   PetscInt               dim, cdim, nroots;
1645:   PetscSF                sfPoint;
1646:   ISLocalToGlobalMapping ltogMigration;
1647:   ISLocalToGlobalMapping ltogOriginal = NULL;
1648:   PetscBool              flg;
1649:   PetscErrorCode         ierr;

1653:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1654:   PetscObjectGetComm((PetscObject) dm, &comm);
1655:   DMGetDimension(dm, &dim);
1656:   DMSetDimension(targetDM, dim);
1657:   DMGetCoordinateDim(dm, &cdim);
1658:   DMSetCoordinateDim(targetDM, cdim);

1660:   /* Check for a one-to-all distribution pattern */
1661:   DMGetPointSF(dm, &sfPoint);
1662:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1663:   if (nroots >= 0) {
1664:     IS                     isOriginal;
1665:     PetscInt               n, size, nleaves;
1666:     PetscInt              *numbering_orig, *numbering_new;
1667:     /* Get the original point numbering */
1668:     DMPlexCreatePointNumbering(dm, &isOriginal);
1669:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1670:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1671:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1672:     /* Convert to positive global numbers */
1673:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1674:     /* Derive the new local-to-global mapping from the old one */
1675:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1676:     PetscMalloc1(nleaves, &numbering_new);
1677:     PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1678:     PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1679:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1680:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1681:     ISDestroy(&isOriginal);
1682:   } else {
1683:     /* One-to-all distribution pattern: We can derive LToG from SF */
1684:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1685:   }
1686:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1687:   if (flg) {
1688:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1689:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1690:   }
1691:   /* Migrate DM data to target DM */
1692:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1693:   DMPlexDistributeLabels(dm, sf, targetDM);
1694:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1695:   DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1696:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1697:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1698:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1699:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1700:   return(0);
1701: }

1703: /*@C
1704:   DMPlexDistribute - Distributes the mesh and any associated sections.

1706:   Collective on DM

1708:   Input Parameter:
1709: + dm  - The original DMPlex object
1710: - overlap - The overlap of partitions, 0 is the default

1712:   Output Parameter:
1713: + sf - The PetscSF used for point distribution, or NULL if not needed
1714: - dmParallel - The distributed DMPlex object

1716:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1718:   The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and
1719:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1720:   representation on the mesh.

1722:   Level: intermediate

1724: .keywords: mesh, elements
1725: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1726: @*/
1727: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1728: {
1729:   MPI_Comm               comm;
1730:   PetscPartitioner       partitioner;
1731:   IS                     cellPart;
1732:   PetscSection           cellPartSection;
1733:   DM                     dmCoord;
1734:   DMLabel                lblPartition, lblMigration;
1735:   PetscSF                sfProcess, sfMigration, sfStratified, sfPoint;
1736:   PetscBool              flg, balance;
1737:   PetscMPIInt            rank, size, p;
1738:   PetscErrorCode         ierr;


1746:   if (sf) *sf = NULL;
1747:   *dmParallel = NULL;
1748:   PetscObjectGetComm((PetscObject)dm,&comm);
1749:   MPI_Comm_rank(comm, &rank);
1750:   MPI_Comm_size(comm, &size);
1751:   if (size == 1) return(0);

1753:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1754:   /* Create cell partition */
1755:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1756:   PetscSectionCreate(comm, &cellPartSection);
1757:   DMPlexGetPartitioner(dm, &partitioner);
1758:   PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1759:   {
1760:     /* Convert partition to DMLabel */
1761:     PetscInt proc, pStart, pEnd, npoints, poffset;
1762:     const PetscInt *points;
1763:     DMLabelCreate("Point Partition", &lblPartition);
1764:     ISGetIndices(cellPart, &points);
1765:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1766:     for (proc = pStart; proc < pEnd; proc++) {
1767:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1768:       PetscSectionGetOffset(cellPartSection, proc, &poffset);
1769:       for (p = poffset; p < poffset+npoints; p++) {
1770:         DMLabelSetValue(lblPartition, points[p], proc);
1771:       }
1772:     }
1773:     ISRestoreIndices(cellPart, &points);
1774:   }
1775:   DMPlexPartitionLabelClosure(dm, lblPartition);
1776:   {
1777:     /* Build a global process SF */
1778:     PetscSFNode *remoteProc;
1779:     PetscMalloc1(size, &remoteProc);
1780:     for (p = 0; p < size; ++p) {
1781:       remoteProc[p].rank  = p;
1782:       remoteProc[p].index = rank;
1783:     }
1784:     PetscSFCreate(comm, &sfProcess);
1785:     PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1786:     PetscSFSetGraph(sfProcess, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1787:   }
1788:   DMLabelCreate("Point migration", &lblMigration);
1789:   DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1790:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1791:   /* Stratify the SF in case we are migrating an already parallel plex */
1792:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1793:   PetscSFDestroy(&sfMigration);
1794:   sfMigration = sfStratified;
1795:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1796:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1797:   if (flg) {
1798:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1799:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1800:   }

1802:   /* Create non-overlapping parallel DM and migrate internal data */
1803:   DMPlexCreate(comm, dmParallel);
1804:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1805:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1807:   /* Build the point SF without overlap */
1808:   DMPlexGetPartitionBalance(dm, &balance);
1809:   DMPlexSetPartitionBalance(*dmParallel, balance);
1810:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1811:   DMSetPointSF(*dmParallel, sfPoint);
1812:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1813:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1814:   if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}

1816:   if (overlap > 0) {
1817:     DM                 dmOverlap;
1818:     PetscInt           nroots, nleaves;
1819:     PetscSFNode       *newRemote;
1820:     const PetscSFNode *oldRemote;
1821:     PetscSF            sfOverlap, sfOverlapPoint;
1822:     /* Add the partition overlap to the distributed DM */
1823:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1824:     DMDestroy(dmParallel);
1825:     *dmParallel = dmOverlap;
1826:     if (flg) {
1827:       PetscPrintf(comm, "Overlap Migration SF:\n");
1828:       PetscSFView(sfOverlap, NULL);
1829:     }

1831:     /* Re-map the migration SF to establish the full migration pattern */
1832:     PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1833:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1834:     PetscMalloc1(nleaves, &newRemote);
1835:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1836:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1837:     PetscSFCreate(comm, &sfOverlapPoint);
1838:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1839:     PetscSFDestroy(&sfOverlap);
1840:     PetscSFDestroy(&sfMigration);
1841:     sfMigration = sfOverlapPoint;
1842:   }
1843:   /* Cleanup Partition */
1844:   PetscSFDestroy(&sfProcess);
1845:   DMLabelDestroy(&lblPartition);
1846:   DMLabelDestroy(&lblMigration);
1847:   PetscSectionDestroy(&cellPartSection);
1848:   ISDestroy(&cellPart);
1849:   /* Copy BC */
1850:   DMCopyBoundary(dm, *dmParallel);
1851:   /* Create sfNatural */
1852:   if (dm->useNatural) {
1853:     PetscSection section;

1855:     DMGetSection(dm, &section);
1856:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1857:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1858:   }
1859:   /* Cleanup */
1860:   if (sf) {*sf = sfMigration;}
1861:   else    {PetscSFDestroy(&sfMigration);}
1862:   PetscSFDestroy(&sfPoint);
1863:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1864:   return(0);
1865: }

1867: /*@C
1868:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1870:   Collective on DM

1872:   Input Parameter:
1873: + dm  - The non-overlapping distrbuted DMPlex object
1874: - overlap - The overlap of partitions

1876:   Output Parameter:
1877: + sf - The PetscSF used for point distribution
1878: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1880:   Note: If the mesh was not distributed, the return value is NULL.

1882:   The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1883:   DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1884:   representation on the mesh.

1886:   Level: intermediate

1888: .keywords: mesh, elements
1889: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1890: @*/
1891: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1892: {
1893:   MPI_Comm               comm;
1894:   PetscMPIInt            size, rank;
1895:   PetscSection           rootSection, leafSection;
1896:   IS                     rootrank, leafrank;
1897:   DM                     dmCoord;
1898:   DMLabel                lblOverlap;
1899:   PetscSF                sfOverlap, sfStratified, sfPoint;
1900:   PetscErrorCode         ierr;


1907:   if (sf) *sf = NULL;
1908:   *dmOverlap  = NULL;
1909:   PetscObjectGetComm((PetscObject)dm,&comm);
1910:   MPI_Comm_size(comm, &size);
1911:   MPI_Comm_rank(comm, &rank);
1912:   if (size == 1) return(0);

1914:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1915:   /* Compute point overlap with neighbouring processes on the distributed DM */
1916:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1917:   PetscSectionCreate(comm, &rootSection);
1918:   PetscSectionCreate(comm, &leafSection);
1919:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1920:   DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1921:   /* Convert overlap label to stratified migration SF */
1922:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1923:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1924:   PetscSFDestroy(&sfOverlap);
1925:   sfOverlap = sfStratified;
1926:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1927:   PetscSFSetFromOptions(sfOverlap);

1929:   PetscSectionDestroy(&rootSection);
1930:   PetscSectionDestroy(&leafSection);
1931:   ISDestroy(&rootrank);
1932:   ISDestroy(&leafrank);
1933:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1935:   /* Build the overlapping DM */
1936:   DMPlexCreate(comm, dmOverlap);
1937:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1938:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1939:   /* Build the new point SF */
1940:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1941:   DMSetPointSF(*dmOverlap, sfPoint);
1942:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1943:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1944:   PetscSFDestroy(&sfPoint);
1945:   /* Cleanup overlap partition */
1946:   DMLabelDestroy(&lblOverlap);
1947:   if (sf) *sf = sfOverlap;
1948:   else    {PetscSFDestroy(&sfOverlap);}
1949:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1950:   return(0);
1951: }

1953: /*@C
1954:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1955:   root process of the original's communicator.
1956:   
1957:   Collective on DM

1959:   Input Parameters:
1960: . dm - the original DMPlex object

1962:   Output Parameters:
1963: . gatherMesh - the gathered DM object, or NULL

1965:   Level: intermediate

1967: .keywords: mesh
1968: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1969: @*/
1970: PetscErrorCode DMPlexGetGatherDM(DM dm, DM *gatherMesh)
1971: {
1972:   MPI_Comm       comm;
1973:   PetscMPIInt    size;
1974:   PetscPartitioner oldPart, gatherPart;

1980:   *gatherMesh = NULL;
1981:   comm = PetscObjectComm((PetscObject)dm);
1982:   MPI_Comm_size(comm,&size);
1983:   if (size == 1) return(0);
1984:   DMPlexGetPartitioner(dm,&oldPart);
1985:   PetscObjectReference((PetscObject)oldPart);
1986:   PetscPartitionerCreate(comm,&gatherPart);
1987:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1988:   DMPlexSetPartitioner(dm,gatherPart);
1989:   DMPlexDistribute(dm,0,NULL,gatherMesh);
1990:   DMPlexSetPartitioner(dm,oldPart);
1991:   PetscPartitionerDestroy(&gatherPart);
1992:   PetscPartitionerDestroy(&oldPart);
1993:   return(0);
1994: }

1996: /*@C
1997:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1998:   
1999:   Collective on DM

2001:   Input Parameters:
2002: . dm - the original DMPlex object

2004:   Output Parameters:
2005: . redundantMesh - the redundant DM object, or NULL

2007:   Level: intermediate

2009: .keywords: mesh
2010: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
2011: @*/
2012: PetscErrorCode DMPlexGetRedundantDM(DM dm, DM *redundantMesh)
2013: {
2014:   MPI_Comm       comm;
2015:   PetscMPIInt    size, rank;
2016:   PetscInt       pStart, pEnd, p;
2017:   PetscInt       numPoints = -1;
2018:   PetscSF        migrationSF, sfPoint;
2019:   DM             gatherDM, dmCoord;
2020:   PetscSFNode    *points;

2026:   *redundantMesh = NULL;
2027:   comm = PetscObjectComm((PetscObject)dm);
2028:   MPI_Comm_size(comm,&size);
2029:   if (size == 1) {
2030:     PetscObjectReference((PetscObject) dm);
2031:     *redundantMesh = dm;
2032:     return(0);
2033:   }
2034:   DMPlexGetGatherDM(dm,&gatherDM);
2035:   if (!gatherDM) return(0);
2036:   MPI_Comm_rank(comm,&rank);
2037:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
2038:   numPoints = pEnd - pStart;
2039:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
2040:   PetscMalloc1(numPoints,&points);
2041:   PetscSFCreate(comm,&migrationSF);
2042:   for (p = 0; p < numPoints; p++) {
2043:     points[p].index = p;
2044:     points[p].rank  = 0;
2045:   }
2046:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
2047:   DMPlexCreate(comm, redundantMesh);
2048:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
2049:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
2050:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
2051:   DMSetPointSF(*redundantMesh, sfPoint);
2052:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
2053:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
2054:   PetscSFDestroy(&sfPoint);
2055:   PetscSFDestroy(&migrationSF);
2056:   DMDestroy(&gatherDM);
2057:   return(0);
2058: }