Actual source code: plexdistribute.c

  1: #include <petsc/private/dmpleximpl.h>
  2: #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 27:   mesh->useradjacency = user;
 28:   mesh->useradjacencyctx = ctx;
 29:   return(0);
 30: }

 32: /*@C
 33:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 35:   Input Parameter:
 36: . dm      - The DM object

 38:   Output Parameters:
 39: - user    - The user callback
 40: - ctx     - context for callback evaluation

 42:   Level: advanced

 44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 45: @*/
 46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 47: {
 48:   DM_Plex *mesh = (DM_Plex *)dm->data;

 52:   if (user) *user = mesh->useradjacency;
 53:   if (ctx) *ctx = mesh->useradjacencyctx;
 54:   return(0);
 55: }

 57: /*@
 58:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 60:   Input Parameters:
 61: + dm      - The DM object
 62: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 64:   Level: intermediate

 66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 67: @*/
 68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 69: {
 70:   DM_Plex *mesh = (DM_Plex *) dm->data;

 74:   mesh->useAnchors = useAnchors;
 75:   return(0);
 76: }

 78: /*@
 79:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 81:   Input Parameter:
 82: . dm      - The DM object

 84:   Output Parameter:
 85: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 87:   Level: intermediate

 89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 90: @*/
 91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 92: {
 93:   DM_Plex *mesh = (DM_Plex *) dm->data;

 98:   *useAnchors = mesh->useAnchors;
 99:   return(0);
100: }

102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104:   const PetscInt *cone = NULL;
105:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106:   PetscErrorCode  ierr;

109:   DMPlexGetConeSize(dm, p, &coneSize);
110:   DMPlexGetCone(dm, p, &cone);
111:   for (c = 0; c <= coneSize; ++c) {
112:     const PetscInt  point   = !c ? p : cone[c-1];
113:     const PetscInt *support = NULL;
114:     PetscInt        supportSize, s, q;

116:     DMPlexGetSupportSize(dm, point, &supportSize);
117:     DMPlexGetSupport(dm, point, &support);
118:     for (s = 0; s < supportSize; ++s) {
119:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120:         if (support[s] == adj[q]) break;
121:       }
122:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123:     }
124:   }
125:   *adjSize = numAdj;
126:   return(0);
127: }

129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131:   const PetscInt *support = NULL;
132:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
133:   PetscErrorCode  ierr;

136:   DMPlexGetSupportSize(dm, p, &supportSize);
137:   DMPlexGetSupport(dm, p, &support);
138:   for (s = 0; s <= supportSize; ++s) {
139:     const PetscInt  point = !s ? p : support[s-1];
140:     const PetscInt *cone  = NULL;
141:     PetscInt        coneSize, c, q;

143:     DMPlexGetConeSize(dm, point, &coneSize);
144:     DMPlexGetCone(dm, point, &cone);
145:     for (c = 0; c < coneSize; ++c) {
146:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147:         if (cone[c] == adj[q]) break;
148:       }
149:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150:     }
151:   }
152:   *adjSize = numAdj;
153:   return(0);
154: }

156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158:   PetscInt      *star = NULL;
159:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

163:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164:   for (s = 0; s < starSize*2; s += 2) {
165:     const PetscInt *closure = NULL;
166:     PetscInt        closureSize, c, q;

168:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169:     for (c = 0; c < closureSize*2; c += 2) {
170:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171:         if (closure[c] == adj[q]) break;
172:       }
173:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174:     }
175:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176:   }
177:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178:   *adjSize = numAdj;
179:   return(0);
180: }

182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184:   static PetscInt asiz = 0;
185:   PetscInt maxAnchors = 1;
186:   PetscInt aStart = -1, aEnd = -1;
187:   PetscInt maxAdjSize;
188:   PetscSection aSec = NULL;
189:   IS aIS = NULL;
190:   const PetscInt *anchors;
191:   DM_Plex *mesh = (DM_Plex *)dm->data;
192:   PetscErrorCode  ierr;

195:   if (useAnchors) {
196:     DMPlexGetAnchors(dm,&aSec,&aIS);
197:     if (aSec) {
198:       PetscSectionGetMaxDof(aSec,&maxAnchors);
199:       maxAnchors = PetscMax(1,maxAnchors);
200:       PetscSectionGetChart(aSec,&aStart,&aEnd);
201:       ISGetIndices(aIS,&anchors);
202:     }
203:   }
204:   if (!*adj) {
205:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

207:     DMPlexGetChart(dm, &pStart,&pEnd);
208:     DMPlexGetDepth(dm, &depth);
209:     depth = PetscMax(depth, -depth);
210:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
211:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
212:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
213:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
214:     asiz *= maxAnchors;
215:     asiz  = PetscMin(asiz,pEnd-pStart);
216:     PetscMalloc1(asiz,adj);
217:   }
218:   if (*adjSize < 0) *adjSize = asiz;
219:   maxAdjSize = *adjSize;
220:   if (mesh->useradjacency) {
221:     (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
222:   } else if (useTransitiveClosure) {
223:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
224:   } else if (useCone) {
225:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
226:   } else {
227:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
228:   }
229:   if (useAnchors && aSec) {
230:     PetscInt origSize = *adjSize;
231:     PetscInt numAdj = origSize;
232:     PetscInt i = 0, j;
233:     PetscInt *orig = *adj;

235:     while (i < origSize) {
236:       PetscInt p = orig[i];
237:       PetscInt aDof = 0;

239:       if (p >= aStart && p < aEnd) {
240:         PetscSectionGetDof(aSec,p,&aDof);
241:       }
242:       if (aDof) {
243:         PetscInt aOff;
244:         PetscInt s, q;

246:         for (j = i + 1; j < numAdj; j++) {
247:           orig[j - 1] = orig[j];
248:         }
249:         origSize--;
250:         numAdj--;
251:         PetscSectionGetOffset(aSec,p,&aOff);
252:         for (s = 0; s < aDof; ++s) {
253:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
254:             if (anchors[aOff+s] == orig[q]) break;
255:           }
256:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
257:         }
258:       }
259:       else {
260:         i++;
261:       }
262:     }
263:     *adjSize = numAdj;
264:     ISRestoreIndices(aIS,&anchors);
265:   }
266:   return(0);
267: }

269: /*@
270:   DMPlexGetAdjacency - Return all points adjacent to the given point

272:   Input Parameters:
273: + dm - The DM object
274: . p  - The point
275: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
276: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

278:   Output Parameters:
279: + adjSize - The number of adjacent points
280: - adj - The adjacent points

282:   Level: advanced

284:   Notes:
285:     The user must PetscFree the adj array if it was not passed in.

287: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
288: @*/
289: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
290: {
291:   PetscBool      useCone, useClosure, useAnchors;

298:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
299:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
300:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
301:   return(0);
302: }

304: /*@
305:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

307:   Collective on dm

309:   Input Parameters:
310: + dm      - The DM
311: - sfPoint - The PetscSF which encodes point connectivity

313:   Output Parameters:
314: + processRanks - A list of process neighbors, or NULL
315: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

317:   Level: developer

319: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
320: @*/
321: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
322: {
323:   const PetscSFNode *remotePoints;
324:   PetscInt          *localPointsNew;
325:   PetscSFNode       *remotePointsNew;
326:   const PetscInt    *nranks;
327:   PetscInt          *ranksNew;
328:   PetscBT            neighbors;
329:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
330:   PetscMPIInt        size, proc, rank;
331:   PetscErrorCode     ierr;

338:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
339:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
340:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
341:   PetscBTCreate(size, &neighbors);
342:   PetscBTMemzero(size, neighbors);
343:   /* Compute root-to-leaf process connectivity */
344:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
345:   ISGetIndices(rootRanks, &nranks);
346:   for (p = pStart; p < pEnd; ++p) {
347:     PetscInt ndof, noff, n;

349:     PetscSectionGetDof(rootRankSection, p, &ndof);
350:     PetscSectionGetOffset(rootRankSection, p, &noff);
351:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
352:   }
353:   ISRestoreIndices(rootRanks, &nranks);
354:   /* Compute leaf-to-neighbor process connectivity */
355:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
356:   ISGetIndices(leafRanks, &nranks);
357:   for (p = pStart; p < pEnd; ++p) {
358:     PetscInt ndof, noff, n;

360:     PetscSectionGetDof(leafRankSection, p, &ndof);
361:     PetscSectionGetOffset(leafRankSection, p, &noff);
362:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
363:   }
364:   ISRestoreIndices(leafRanks, &nranks);
365:   /* Compute leaf-to-root process connectivity */
366:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
367:   /* Calculate edges */
368:   PetscBTClear(neighbors, rank);
369:   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
370:   PetscMalloc1(numNeighbors, &ranksNew);
371:   PetscMalloc1(numNeighbors, &localPointsNew);
372:   PetscMalloc1(numNeighbors, &remotePointsNew);
373:   for (proc = 0, n = 0; proc < size; ++proc) {
374:     if (PetscBTLookup(neighbors, proc)) {
375:       ranksNew[n]              = proc;
376:       localPointsNew[n]        = proc;
377:       remotePointsNew[n].index = rank;
378:       remotePointsNew[n].rank  = proc;
379:       ++n;
380:     }
381:   }
382:   PetscBTDestroy(&neighbors);
383:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
384:   else              {PetscFree(ranksNew);}
385:   if (sfProcess) {
386:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
387:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
388:     PetscSFSetFromOptions(*sfProcess);
389:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
390:   }
391:   return(0);
392: }

394: /*@
395:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

397:   Collective on dm

399:   Input Parameter:
400: . dm - The DM

402:   Output Parameters:
403: + rootSection - The number of leaves for a given root point
404: . rootrank    - The rank of each edge into the root point
405: . leafSection - The number of processes sharing a given leaf point
406: - leafrank    - The rank of each process sharing a leaf point

408:   Level: developer

410: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
411: @*/
412: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
413: {
414:   MPI_Comm        comm;
415:   PetscSF         sfPoint;
416:   const PetscInt *rootdegree;
417:   PetscInt       *myrank, *remoterank;
418:   PetscInt        pStart, pEnd, p, nedges;
419:   PetscMPIInt     rank;
420:   PetscErrorCode  ierr;

423:   PetscObjectGetComm((PetscObject) dm, &comm);
424:   MPI_Comm_rank(comm, &rank);
425:   DMPlexGetChart(dm, &pStart, &pEnd);
426:   DMGetPointSF(dm, &sfPoint);
427:   /* Compute number of leaves for each root */
428:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
429:   PetscSectionSetChart(rootSection, pStart, pEnd);
430:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
431:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
432:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
433:   PetscSectionSetUp(rootSection);
434:   /* Gather rank of each leaf to root */
435:   PetscSectionGetStorageSize(rootSection, &nedges);
436:   PetscMalloc1(pEnd-pStart, &myrank);
437:   PetscMalloc1(nedges,  &remoterank);
438:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
439:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
440:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
441:   PetscFree(myrank);
442:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
443:   /* Distribute remote ranks to leaves */
444:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
445:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
446:   return(0);
447: }

449: /*@C
450:   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.

452:   Collective on dm

454:   Input Parameters:
455: + dm          - The DM
456: . levels      - Number of overlap levels
457: . rootSection - The number of leaves for a given root point
458: . rootrank    - The rank of each edge into the root point
459: . leafSection - The number of processes sharing a given leaf point
460: - leafrank    - The rank of each process sharing a leaf point

462:   Output Parameter:
463: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

465:   Level: developer

467: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
468: @*/
469: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
470: {
471:   MPI_Comm           comm;
472:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
473:   PetscSF            sfPoint;
474:   const PetscSFNode *remote;
475:   const PetscInt    *local;
476:   const PetscInt    *nrank, *rrank;
477:   PetscInt          *adj = NULL;
478:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
479:   PetscMPIInt        rank, size;
480:   PetscBool          flg;
481:   PetscErrorCode     ierr;

484:   *ovLabel = NULL;
485:   PetscObjectGetComm((PetscObject) dm, &comm);
486:   MPI_Comm_size(comm, &size);
487:   MPI_Comm_rank(comm, &rank);
488:   if (size == 1) return(0);
489:   DMGetPointSF(dm, &sfPoint);
490:   DMPlexGetChart(dm, &pStart, &pEnd);
491:   if (!levels) {
492:     /* Add owned points */
493:     DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
494:     for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
495:     return(0);
496:   }
497:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
498:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
499:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
500:   /* Handle leaves: shared with the root point */
501:   for (l = 0; l < nleaves; ++l) {
502:     PetscInt adjSize = PETSC_DETERMINE, a;

504:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
505:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
506:   }
507:   ISGetIndices(rootrank, &rrank);
508:   ISGetIndices(leafrank, &nrank);
509:   /* Handle roots */
510:   for (p = pStart; p < pEnd; ++p) {
511:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

513:     if ((p >= sStart) && (p < sEnd)) {
514:       /* Some leaves share a root with other leaves on different processes */
515:       PetscSectionGetDof(leafSection, p, &neighbors);
516:       if (neighbors) {
517:         PetscSectionGetOffset(leafSection, p, &noff);
518:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
519:         for (n = 0; n < neighbors; ++n) {
520:           const PetscInt remoteRank = nrank[noff+n];

522:           if (remoteRank == rank) continue;
523:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
524:         }
525:       }
526:     }
527:     /* Roots are shared with leaves */
528:     PetscSectionGetDof(rootSection, p, &neighbors);
529:     if (!neighbors) continue;
530:     PetscSectionGetOffset(rootSection, p, &noff);
531:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
532:     for (n = 0; n < neighbors; ++n) {
533:       const PetscInt remoteRank = rrank[noff+n];

535:       if (remoteRank == rank) continue;
536:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
537:     }
538:   }
539:   PetscFree(adj);
540:   ISRestoreIndices(rootrank, &rrank);
541:   ISRestoreIndices(leafrank, &nrank);
542:   /* Add additional overlap levels */
543:   for (l = 1; l < levels; l++) {
544:     /* Propagate point donations over SF to capture remote connections */
545:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
546:     /* Add next level of point donations to the label */
547:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
548:   }
549:   /* We require the closure in the overlap */
550:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
551:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
552:   if (flg) {
553:     PetscViewer viewer;
554:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
555:     DMLabelView(ovAdjByRank, viewer);
556:   }
557:   /* Invert sender to receiver label */
558:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
559:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
560:   /* Add owned points, except for shared local points */
561:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
562:   for (l = 0; l < nleaves; ++l) {
563:     DMLabelClearValue(*ovLabel, local[l], rank);
564:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
565:   }
566:   /* Clean up */
567:   DMLabelDestroy(&ovAdjByRank);
568:   return(0);
569: }

571: /*@C
572:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

574:   Collective on dm

576:   Input Parameters:
577: + dm          - The DM
578: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

580:   Output Parameter:
581: . migrationSF - An SF that maps original points in old locations to points in new locations

583:   Level: developer

585: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
586: @*/
587: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
588: {
589:   MPI_Comm           comm;
590:   PetscMPIInt        rank, size;
591:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
592:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
593:   PetscInt          *depthRecv, *depthShift, *depthIdx;
594:   PetscSFNode       *iremote;
595:   PetscSF            pointSF;
596:   const PetscInt    *sharedLocal;
597:   const PetscSFNode *overlapRemote, *sharedRemote;
598:   PetscErrorCode     ierr;

602:   PetscObjectGetComm((PetscObject)dm, &comm);
603:   MPI_Comm_rank(comm, &rank);
604:   MPI_Comm_size(comm, &size);
605:   DMGetDimension(dm, &dim);

607:   /* Before building the migration SF we need to know the new stratum offsets */
608:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
609:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
610:   for (d=0; d<dim+1; d++) {
611:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
612:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
613:   }
614:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
615:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);
616:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);

618:   /* Count received points in each stratum and compute the internal strata shift */
619:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
620:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
621:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
622:   depthShift[dim] = 0;
623:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
624:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
625:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
626:   for (d=0; d<dim+1; d++) {
627:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
628:     depthIdx[d] = pStart + depthShift[d];
629:   }

631:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
632:   DMPlexGetChart(dm, &pStart, &pEnd);
633:   newLeaves = pEnd - pStart + nleaves;
634:   PetscMalloc1(newLeaves, &ilocal);
635:   PetscMalloc1(newLeaves, &iremote);
636:   /* First map local points to themselves */
637:   for (d=0; d<dim+1; d++) {
638:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
639:     for (p=pStart; p<pEnd; p++) {
640:       point = p + depthShift[d];
641:       ilocal[point] = point;
642:       iremote[point].index = p;
643:       iremote[point].rank = rank;
644:       depthIdx[d]++;
645:     }
646:   }

648:   /* Add in the remote roots for currently shared points */
649:   DMGetPointSF(dm, &pointSF);
650:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
651:   for (d=0; d<dim+1; d++) {
652:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
653:     for (p=0; p<numSharedPoints; p++) {
654:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
655:         point = sharedLocal[p] + depthShift[d];
656:         iremote[point].index = sharedRemote[p].index;
657:         iremote[point].rank = sharedRemote[p].rank;
658:       }
659:     }
660:   }

662:   /* Now add the incoming overlap points */
663:   for (p=0; p<nleaves; p++) {
664:     point = depthIdx[remoteDepths[p]];
665:     ilocal[point] = point;
666:     iremote[point].index = overlapRemote[p].index;
667:     iremote[point].rank = overlapRemote[p].rank;
668:     depthIdx[remoteDepths[p]]++;
669:   }
670:   PetscFree2(pointDepths,remoteDepths);

672:   PetscSFCreate(comm, migrationSF);
673:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
674:   PetscSFSetFromOptions(*migrationSF);
675:   DMPlexGetChart(dm, &pStart, &pEnd);
676:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

678:   PetscFree3(depthRecv, depthShift, depthIdx);
679:   return(0);
680: }

682: /*@
683:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

685:   Input Parameters:
686: + dm          - The DM
687: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

689:   Output Parameter:
690: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

692:   Note:
693:   This lexicographically sorts by (depth, cellType)

695:   Level: developer

697: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
698: @*/
699: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
700: {
701:   MPI_Comm           comm;
702:   PetscMPIInt        rank, size;
703:   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
704:   PetscSFNode       *pointDepths, *remoteDepths;
705:   PetscInt          *ilocal;
706:   PetscInt          *depthRecv, *depthShift, *depthIdx;
707:   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
708:   const PetscSFNode *iremote;
709:   PetscErrorCode     ierr;

713:   PetscObjectGetComm((PetscObject) dm, &comm);
714:   MPI_Comm_rank(comm, &rank);
715:   MPI_Comm_size(comm, &size);
716:   DMPlexGetDepth(dm, &ldepth);
717:   DMGetDimension(dm, &dim);
718:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
719:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
720:   PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);

722:   /* Before building the migration SF we need to know the new stratum offsets */
723:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
724:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
725:   for (d = 0; d < depth+1; ++d) {
726:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
727:     for (p = pStart; p < pEnd; ++p) {
728:       DMPolytopeType ct;

730:       DMPlexGetCellType(dm, p, &ct);
731:       pointDepths[p].index = d;
732:       pointDepths[p].rank  = ct;
733:     }
734:   }
735:   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
736:   PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
737:   PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
738:   /* Count received points in each stratum and compute the internal strata shift */
739:   PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx);
740:   for (p = 0; p < nleaves; ++p) {
741:     if (remoteDepths[p].rank < 0) {
742:       ++depthRecv[remoteDepths[p].index];
743:     } else {
744:       ++ctRecv[remoteDepths[p].rank];
745:     }
746:   }
747:   {
748:     PetscInt depths[4], dims[4], shift = 0, i, c;

750:     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
751:          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
752:      */
753:     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
754:     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
755:     for (i = 0; i <= depth; ++i) {
756:       const PetscInt dep = depths[i];
757:       const PetscInt dim = dims[i];

759:       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
760:         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
761:         ctShift[c] = shift;
762:         shift     += ctRecv[c];
763:       }
764:       depthShift[dep] = shift;
765:       shift          += depthRecv[dep];
766:     }
767:     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
768:       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);

770:       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
771:         ctShift[c] = shift;
772:         shift     += ctRecv[c];
773:       }
774:     }
775:   }
776:   /* Derive a new local permutation based on stratified indices */
777:   PetscMalloc1(nleaves, &ilocal);
778:   for (p = 0; p < nleaves; ++p) {
779:     const PetscInt       dep = remoteDepths[p].index;
780:     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;

782:     if ((PetscInt) ct < 0) {
783:       ilocal[p] = depthShift[dep] + depthIdx[dep];
784:       ++depthIdx[dep];
785:     } else {
786:       ilocal[p] = ctShift[ct] + ctIdx[ct];
787:       ++ctIdx[ct];
788:     }
789:   }
790:   PetscSFCreate(comm, migrationSF);
791:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
792:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
793:   PetscFree2(pointDepths,remoteDepths);
794:   PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx);
795:   PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
796:   return(0);
797: }

799: /*@
800:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

802:   Collective on dm

804:   Input Parameters:
805: + dm - The DMPlex object
806: . pointSF - The PetscSF describing the communication pattern
807: . originalSection - The PetscSection for existing data layout
808: - originalVec - The existing data in a local vector

810:   Output Parameters:
811: + newSection - The PetscSF describing the new data layout
812: - newVec - The new data in a local vector

814:   Level: developer

816: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
817: @*/
818: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
819: {
820:   PetscSF        fieldSF;
821:   PetscInt      *remoteOffsets, fieldSize;
822:   PetscScalar   *originalValues, *newValues;

826:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
827:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

829:   PetscSectionGetStorageSize(newSection, &fieldSize);
830:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
831:   VecSetType(newVec,dm->vectype);

833:   VecGetArray(originalVec, &originalValues);
834:   VecGetArray(newVec, &newValues);
835:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
836:   PetscFree(remoteOffsets);
837:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
838:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
839:   PetscSFDestroy(&fieldSF);
840:   VecRestoreArray(newVec, &newValues);
841:   VecRestoreArray(originalVec, &originalValues);
842:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
843:   return(0);
844: }

846: /*@
847:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

849:   Collective on dm

851:   Input Parameters:
852: + dm - The DMPlex object
853: . pointSF - The PetscSF describing the communication pattern
854: . originalSection - The PetscSection for existing data layout
855: - originalIS - The existing data

857:   Output Parameters:
858: + newSection - The PetscSF describing the new data layout
859: - newIS - The new data

861:   Level: developer

863: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
864: @*/
865: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
866: {
867:   PetscSF         fieldSF;
868:   PetscInt       *newValues, *remoteOffsets, fieldSize;
869:   const PetscInt *originalValues;
870:   PetscErrorCode  ierr;

873:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
874:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

876:   PetscSectionGetStorageSize(newSection, &fieldSize);
877:   PetscMalloc1(fieldSize, &newValues);

879:   ISGetIndices(originalIS, &originalValues);
880:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
881:   PetscFree(remoteOffsets);
882:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
883:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
884:   PetscSFDestroy(&fieldSF);
885:   ISRestoreIndices(originalIS, &originalValues);
886:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
887:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
888:   return(0);
889: }

891: /*@
892:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

894:   Collective on dm

896:   Input Parameters:
897: + dm - The DMPlex object
898: . pointSF - The PetscSF describing the communication pattern
899: . originalSection - The PetscSection for existing data layout
900: . datatype - The type of data
901: - originalData - The existing data

903:   Output Parameters:
904: + newSection - The PetscSection describing the new data layout
905: - newData - The new data

907:   Level: developer

909: .seealso: DMPlexDistribute(), DMPlexDistributeField()
910: @*/
911: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
912: {
913:   PetscSF        fieldSF;
914:   PetscInt      *remoteOffsets, fieldSize;
915:   PetscMPIInt    dataSize;

919:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
920:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

922:   PetscSectionGetStorageSize(newSection, &fieldSize);
923:   MPI_Type_size(datatype, &dataSize);
924:   PetscMalloc(fieldSize * dataSize, newData);

926:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
927:   PetscFree(remoteOffsets);
928:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
929:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
930:   PetscSFDestroy(&fieldSF);
931:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
932:   return(0);
933: }

935: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
936: {
937:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
938:   MPI_Comm               comm;
939:   PetscSF                coneSF;
940:   PetscSection           originalConeSection, newConeSection;
941:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
942:   PetscBool              flg;
943:   PetscErrorCode         ierr;

948:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
949:   /* Distribute cone section */
950:   PetscObjectGetComm((PetscObject)dm, &comm);
951:   DMPlexGetConeSection(dm, &originalConeSection);
952:   DMPlexGetConeSection(dmParallel, &newConeSection);
953:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
954:   DMSetUp(dmParallel);
955:   {
956:     PetscInt pStart, pEnd, p;

958:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
959:     for (p = pStart; p < pEnd; ++p) {
960:       PetscInt coneSize;
961:       PetscSectionGetDof(newConeSection, p, &coneSize);
962:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
963:     }
964:   }
965:   /* Communicate and renumber cones */
966:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
967:   PetscFree(remoteOffsets);
968:   DMPlexGetCones(dm, &cones);
969:   if (original) {
970:     PetscInt numCones;

972:     PetscSectionGetStorageSize(originalConeSection,&numCones);
973:     PetscMalloc1(numCones,&globCones);
974:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
975:   } else {
976:     globCones = cones;
977:   }
978:   DMPlexGetCones(dmParallel, &newCones);
979:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
980:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
981:   if (original) {
982:     PetscFree(globCones);
983:   }
984:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
985:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
986:   if (PetscDefined(USE_DEBUG)) {
987:     PetscInt  p;
988:     PetscBool valid = PETSC_TRUE;
989:     for (p = 0; p < newConesSize; ++p) {
990:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
991:     }
992:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
993:   }
994:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
995:   if (flg) {
996:     PetscPrintf(comm, "Serial Cone Section:\n");
997:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
998:     PetscPrintf(comm, "Parallel Cone Section:\n");
999:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
1000:     PetscSFView(coneSF, NULL);
1001:   }
1002:   DMPlexGetConeOrientations(dm, &cones);
1003:   DMPlexGetConeOrientations(dmParallel, &newCones);
1004:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
1005:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
1006:   PetscSFDestroy(&coneSF);
1007:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1008:   /* Create supports and stratify DMPlex */
1009:   {
1010:     PetscInt pStart, pEnd;

1012:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1013:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1014:   }
1015:   DMPlexSymmetrize(dmParallel);
1016:   DMPlexStratify(dmParallel);
1017:   {
1018:     PetscBool useCone, useClosure, useAnchors;

1020:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1021:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1022:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1023:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1024:   }
1025:   return(0);
1026: }

1028: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1029: {
1030:   MPI_Comm         comm;
1031:   PetscSection     originalCoordSection, newCoordSection;
1032:   Vec              originalCoordinates, newCoordinates;
1033:   PetscInt         bs;
1034:   PetscBool        isper;
1035:   const char      *name;
1036:   const PetscReal *maxCell, *L;
1037:   const DMBoundaryType *bd;
1038:   PetscErrorCode   ierr;


1044:   PetscObjectGetComm((PetscObject)dm, &comm);
1045:   DMGetCoordinateSection(dm, &originalCoordSection);
1046:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1047:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1048:   if (originalCoordinates) {
1049:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1050:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1051:     PetscObjectSetName((PetscObject) newCoordinates, name);

1053:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1054:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1055:     VecGetBlockSize(originalCoordinates, &bs);
1056:     VecSetBlockSize(newCoordinates, bs);
1057:     VecDestroy(&newCoordinates);
1058:   }
1059:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1060:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1061:   return(0);
1062: }

1064: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1065: {
1066:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1067:   MPI_Comm         comm;
1068:   DMLabel          depthLabel;
1069:   PetscMPIInt      rank;
1070:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1071:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1072:   PetscObjectState depthState = -1;
1073:   PetscErrorCode   ierr;


1079:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1080:   PetscObjectGetComm((PetscObject)dm, &comm);
1081:   MPI_Comm_rank(comm, &rank);

1083:   /* If the user has changed the depth label, communicate it instead */
1084:   DMPlexGetDepth(dm, &depth);
1085:   DMPlexGetDepthLabel(dm, &depthLabel);
1086:   if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1087:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1088:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1089:   if (sendDepth) {
1090:     DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1091:     DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1092:   }
1093:   /* Everyone must have either the same number of labels, or none */
1094:   DMGetNumLabels(dm, &numLocalLabels);
1095:   numLabels = numLocalLabels;
1096:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1097:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1098:   for (l = 0; l < numLabels; ++l) {
1099:     DMLabel     label = NULL, labelNew = NULL;
1100:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1101:     const char *name = NULL;

1103:     if (hasLabels) {
1104:       DMGetLabelByNum(dm, l, &label);
1105:       /* Skip "depth" because it is recreated */
1106:       PetscObjectGetName((PetscObject) label, &name);
1107:       PetscStrcmp(name, "depth", &isDepth);
1108:     }
1109:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1110:     if (isDepth && !sendDepth) continue;
1111:     DMLabelDistribute(label, migrationSF, &labelNew);
1112:     if (isDepth) {
1113:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1114:       PetscInt gdepth;

1116:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1117:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1118:       for (d = 0; d <= gdepth; ++d) {
1119:         PetscBool has;

1121:         DMLabelHasStratum(labelNew, d, &has);
1122:         if (!has) {DMLabelAddStratum(labelNew, d);}
1123:       }
1124:     }
1125:     DMAddLabel(dmParallel, labelNew);
1126:     /* Put the output flag in the new label */
1127:     if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1128:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1129:     PetscObjectGetName((PetscObject) labelNew, &name);
1130:     DMSetLabelOutput(dmParallel, name, isOutput);
1131:     DMLabelDestroy(&labelNew);
1132:   }
1133:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1134:   return(0);
1135: }

1137: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1138: {
1139:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1140:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1141:   MPI_Comm        comm;
1142:   DM              refTree;
1143:   PetscSection    origParentSection, newParentSection;
1144:   PetscInt        *origParents, *origChildIDs;
1145:   PetscBool       flg;
1146:   PetscErrorCode  ierr;

1151:   PetscObjectGetComm((PetscObject)dm, &comm);

1153:   /* Set up tree */
1154:   DMPlexGetReferenceTree(dm,&refTree);
1155:   DMPlexSetReferenceTree(dmParallel,refTree);
1156:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1157:   if (origParentSection) {
1158:     PetscInt        pStart, pEnd;
1159:     PetscInt        *newParents, *newChildIDs, *globParents;
1160:     PetscInt        *remoteOffsetsParents, newParentSize;
1161:     PetscSF         parentSF;

1163:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1164:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1165:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1166:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1167:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1168:     PetscFree(remoteOffsetsParents);
1169:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1170:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1171:     if (original) {
1172:       PetscInt numParents;

1174:       PetscSectionGetStorageSize(origParentSection,&numParents);
1175:       PetscMalloc1(numParents,&globParents);
1176:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1177:     }
1178:     else {
1179:       globParents = origParents;
1180:     }
1181:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1182:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1183:     if (original) {
1184:       PetscFree(globParents);
1185:     }
1186:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1187:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1188:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1189:     if (PetscDefined(USE_DEBUG)) {
1190:       PetscInt  p;
1191:       PetscBool valid = PETSC_TRUE;
1192:       for (p = 0; p < newParentSize; ++p) {
1193:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1194:       }
1195:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1196:     }
1197:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1198:     if (flg) {
1199:       PetscPrintf(comm, "Serial Parent Section: \n");
1200:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1201:       PetscPrintf(comm, "Parallel Parent Section: \n");
1202:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1203:       PetscSFView(parentSF, NULL);
1204:     }
1205:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1206:     PetscSectionDestroy(&newParentSection);
1207:     PetscFree2(newParents,newChildIDs);
1208:     PetscSFDestroy(&parentSF);
1209:   }
1210:   pmesh->useAnchors = mesh->useAnchors;
1211:   return(0);
1212: }

1214: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1215: {
1216:   PetscMPIInt            rank, size;
1217:   MPI_Comm               comm;
1218:   PetscErrorCode         ierr;


1224:   /* Create point SF for parallel mesh */
1225:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1226:   PetscObjectGetComm((PetscObject)dm, &comm);
1227:   MPI_Comm_rank(comm, &rank);
1228:   MPI_Comm_size(comm, &size);
1229:   {
1230:     const PetscInt *leaves;
1231:     PetscSFNode    *remotePoints, *rowners, *lowners;
1232:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1233:     PetscInt        pStart, pEnd;

1235:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1236:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1237:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1238:     for (p=0; p<numRoots; p++) {
1239:       rowners[p].rank  = -1;
1240:       rowners[p].index = -1;
1241:     }
1242:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1243:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1244:     for (p = 0; p < numLeaves; ++p) {
1245:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1246:         lowners[p].rank  = rank;
1247:         lowners[p].index = leaves ? leaves[p] : p;
1248:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1249:         lowners[p].rank  = -2;
1250:         lowners[p].index = -2;
1251:       }
1252:     }
1253:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1254:       rowners[p].rank  = -3;
1255:       rowners[p].index = -3;
1256:     }
1257:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1258:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1259:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1260:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1261:     for (p = 0; p < numLeaves; ++p) {
1262:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1263:       if (lowners[p].rank != rank) ++numGhostPoints;
1264:     }
1265:     PetscMalloc1(numGhostPoints, &ghostPoints);
1266:     PetscMalloc1(numGhostPoints, &remotePoints);
1267:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1268:       if (lowners[p].rank != rank) {
1269:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1270:         remotePoints[gp].rank  = lowners[p].rank;
1271:         remotePoints[gp].index = lowners[p].index;
1272:         ++gp;
1273:       }
1274:     }
1275:     PetscFree2(rowners,lowners);
1276:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1277:     PetscSFSetFromOptions((dmParallel)->sf);
1278:   }
1279:   {
1280:     PetscBool useCone, useClosure, useAnchors;

1282:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1283:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1284:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1285:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1286:   }
1287:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1288:   return(0);
1289: }

1291: /*@
1292:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1294:   Input Parameters:
1295: + dm - The DMPlex object
1296: - flg - Balance the partition?

1298:   Level: intermediate

1300: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1301: @*/
1302: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1303: {
1304:   DM_Plex *mesh = (DM_Plex *)dm->data;

1307:   mesh->partitionBalance = flg;
1308:   return(0);
1309: }

1311: /*@
1312:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1314:   Input Parameter:
1315: . dm - The DMPlex object

1317:   Output Parameter:
1318: . flg - Balance the partition?

1320:   Level: intermediate

1322: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1323: @*/
1324: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1325: {
1326:   DM_Plex *mesh = (DM_Plex *)dm->data;

1331:   *flg = mesh->partitionBalance;
1332:   return(0);
1333: }

1335: typedef struct {
1336:   PetscInt vote, rank, index;
1337: } Petsc3Int;

1339: /* MaxLoc, but carry a third piece of information around */
1340: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1341: {
1342:   Petsc3Int *a = (Petsc3Int *)inout_;
1343:   Petsc3Int *b = (Petsc3Int *)in_;
1344:   PetscInt i, len = *len_;
1345:   for (i = 0; i < len; i++) {
1346:     if (a[i].vote < b[i].vote) {
1347:       a[i].vote = b[i].vote;
1348:       a[i].rank = b[i].rank;
1349:       a[i].index = b[i].index;
1350:     } else if (a[i].vote <= b[i].vote) {
1351:       if (a[i].rank >= b[i].rank) {
1352:         a[i].rank = b[i].rank;
1353:         a[i].index = b[i].index;
1354:       }
1355:     }
1356:   }
1357: }

1359: /*@C
1360:   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration

1362:   Input Parameters:
1363: + dm          - The source DMPlex object
1364: . migrationSF - The star forest that describes the parallel point remapping
1365: . ownership   - Flag causing a vote to determine point ownership

1367:   Output Parameter:
1368: - pointSF     - The star forest describing the point overlap in the remapped DM

1370:   Notes:
1371:   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.

1373:   Level: developer

1375: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1376: @*/
1377: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1378: {
1379:   PetscMPIInt        rank, size;
1380:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1381:   PetscInt          *pointLocal;
1382:   const PetscInt    *leaves;
1383:   const PetscSFNode *roots;
1384:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1385:   Vec                shifts;
1386:   const PetscInt     numShifts = 13759;
1387:   const PetscScalar *shift = NULL;
1388:   const PetscBool    shiftDebug = PETSC_FALSE;
1389:   PetscBool          balance;
1390:   PetscErrorCode     ierr;

1394:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1395:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1396:   PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);

1398:   DMPlexGetPartitionBalance(dm, &balance);
1399:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1400:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1401:   if (ownership) {
1402:     MPI_Op       op;
1403:     MPI_Datatype datatype;
1404:     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1405:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1406:     if (balance) {
1407:       PetscRandom r;

1409:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1410:       PetscRandomSetInterval(r, 0, 2467*size);
1411:       VecCreate(PETSC_COMM_SELF, &shifts);
1412:       VecSetSizes(shifts, numShifts, numShifts);
1413:       VecSetType(shifts, VECSTANDARD);
1414:       VecSetRandom(shifts, r);
1415:       PetscRandomDestroy(&r);
1416:       VecGetArrayRead(shifts, &shift);
1417:     }

1419:     PetscMalloc1(nroots, &rootVote);
1420:     PetscMalloc1(nleaves, &leafVote);
1421:     /* Point ownership vote: Process with highest rank owns shared points */
1422:     for (p = 0; p < nleaves; ++p) {
1423:       if (shiftDebug) {
1424:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1425:       }
1426:       /* Either put in a bid or we know we own it */
1427:       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1428:       leafVote[p].rank = rank;
1429:       leafVote[p].index = p;
1430:     }
1431:     for (p = 0; p < nroots; p++) {
1432:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1433:       rootVote[p].vote  = -3;
1434:       rootVote[p].rank  = -3;
1435:       rootVote[p].index = -3;
1436:     }
1437:     MPI_Type_contiguous(3, MPIU_INT, &datatype);
1438:     MPI_Type_commit(&datatype);
1439:     MPI_Op_create(&MaxLocCarry, 1, &op);
1440:     PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1441:     PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1442:     MPI_Op_free(&op);
1443:     MPI_Type_free(&datatype);
1444:     for (p = 0; p < nroots; p++) {
1445:       rootNodes[p].rank = rootVote[p].rank;
1446:       rootNodes[p].index = rootVote[p].index;
1447:     }
1448:     PetscFree(leafVote);
1449:     PetscFree(rootVote);
1450:   } else {
1451:     for (p = 0; p < nroots; p++) {
1452:       rootNodes[p].index = -1;
1453:       rootNodes[p].rank = rank;
1454:     }
1455:     for (p = 0; p < nleaves; p++) {
1456:       /* Write new local id into old location */
1457:       if (roots[p].rank == rank) {
1458:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1459:       }
1460:     }
1461:   }
1462:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);
1463:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);

1465:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1466:     if (leafNodes[p].rank != rank) npointLeaves++;
1467:   }
1468:   PetscMalloc1(npointLeaves, &pointLocal);
1469:   PetscMalloc1(npointLeaves, &pointRemote);
1470:   for (idx = 0, p = 0; p < nleaves; p++) {
1471:     if (leafNodes[p].rank != rank) {
1472:       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1473:       pointLocal[idx] = p;
1474:       pointRemote[idx] = leafNodes[p];
1475:       idx++;
1476:     }
1477:   }
1478:   if (shift) {
1479:     VecRestoreArrayRead(shifts, &shift);
1480:     VecDestroy(&shifts);
1481:   }
1482:   if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1483:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1484:   PetscSFSetFromOptions(*pointSF);
1485:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1486:   PetscFree2(rootNodes, leafNodes);
1487:   PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1488:   return(0);
1489: }

1491: /*@C
1492:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1494:   Collective on dm

1496:   Input Parameters:
1497: + dm       - The source DMPlex object
1498: . sf       - The star forest communication context describing the migration pattern

1500:   Output Parameter:
1501: - targetDM - The target DMPlex object

1503:   Level: intermediate

1505: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1506: @*/
1507: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1508: {
1509:   MPI_Comm               comm;
1510:   PetscInt               dim, cdim, nroots;
1511:   PetscSF                sfPoint;
1512:   ISLocalToGlobalMapping ltogMigration;
1513:   ISLocalToGlobalMapping ltogOriginal = NULL;
1514:   PetscBool              flg;
1515:   PetscErrorCode         ierr;

1519:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1520:   PetscObjectGetComm((PetscObject) dm, &comm);
1521:   DMGetDimension(dm, &dim);
1522:   DMSetDimension(targetDM, dim);
1523:   DMGetCoordinateDim(dm, &cdim);
1524:   DMSetCoordinateDim(targetDM, cdim);

1526:   /* Check for a one-to-all distribution pattern */
1527:   DMGetPointSF(dm, &sfPoint);
1528:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1529:   if (nroots >= 0) {
1530:     IS        isOriginal;
1531:     PetscInt  n, size, nleaves;
1532:     PetscInt  *numbering_orig, *numbering_new;

1534:     /* Get the original point numbering */
1535:     DMPlexCreatePointNumbering(dm, &isOriginal);
1536:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1537:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1538:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1539:     /* Convert to positive global numbers */
1540:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1541:     /* Derive the new local-to-global mapping from the old one */
1542:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1543:     PetscMalloc1(nleaves, &numbering_new);
1544:     PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1545:     PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1546:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1547:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1548:     ISDestroy(&isOriginal);
1549:   } else {
1550:     /* One-to-all distribution pattern: We can derive LToG from SF */
1551:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1552:   }
1553:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1554:   if (flg) {
1555:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1556:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1557:   }
1558:   /* Migrate DM data to target DM */
1559:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1560:   DMPlexDistributeLabels(dm, sf, targetDM);
1561:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1562:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1563:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1564:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1565:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1566:   return(0);
1567: }

1569: /*@C
1570:   DMPlexDistribute - Distributes the mesh and any associated sections.

1572:   Collective on dm

1574:   Input Parameters:
1575: + dm  - The original DMPlex object
1576: - overlap - The overlap of partitions, 0 is the default

1578:   Output Parameters:
1579: + sf - The PetscSF used for point distribution, or NULL if not needed
1580: - dmParallel - The distributed DMPlex object

1582:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1584:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1585:   representation on the mesh.

1587:   Level: intermediate

1589: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1590: @*/
1591: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1592: {
1593:   MPI_Comm               comm;
1594:   PetscPartitioner       partitioner;
1595:   IS                     cellPart;
1596:   PetscSection           cellPartSection;
1597:   DM                     dmCoord;
1598:   DMLabel                lblPartition, lblMigration;
1599:   PetscSF                sfMigration, sfStratified, sfPoint;
1600:   PetscBool              flg, balance;
1601:   PetscMPIInt            rank, size;
1602:   PetscErrorCode         ierr;


1610:   if (sf) *sf = NULL;
1611:   *dmParallel = NULL;
1612:   PetscObjectGetComm((PetscObject)dm,&comm);
1613:   MPI_Comm_rank(comm, &rank);
1614:   MPI_Comm_size(comm, &size);
1615:   if (size == 1) return(0);

1617:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1618:   /* Create cell partition */
1619:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1620:   PetscSectionCreate(comm, &cellPartSection);
1621:   DMPlexGetPartitioner(dm, &partitioner);
1622:   PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1623:   PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1624:   {
1625:     /* Convert partition to DMLabel */
1626:     IS             is;
1627:     PetscHSetI     ht;
1628:     const PetscInt *points;
1629:     PetscInt       *iranks;
1630:     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;

1632:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1633:     /* Preallocate strata */
1634:     PetscHSetICreate(&ht);
1635:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1636:     for (proc = pStart; proc < pEnd; proc++) {
1637:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1638:       if (npoints) {PetscHSetIAdd(ht, proc);}
1639:     }
1640:     PetscHSetIGetSize(ht, &nranks);
1641:     PetscMalloc1(nranks, &iranks);
1642:     PetscHSetIGetElems(ht, &poff, iranks);
1643:     PetscHSetIDestroy(&ht);
1644:     DMLabelAddStrata(lblPartition, nranks, iranks);
1645:     PetscFree(iranks);
1646:     /* Inline DMPlexPartitionLabelClosure() */
1647:     ISGetIndices(cellPart, &points);
1648:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1649:     for (proc = pStart; proc < pEnd; proc++) {
1650:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1651:       if (!npoints) continue;
1652:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1653:       DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1654:       DMLabelSetStratumIS(lblPartition, proc, is);
1655:       ISDestroy(&is);
1656:     }
1657:     ISRestoreIndices(cellPart, &points);
1658:   }
1659:   PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);

1661:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1662:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1663:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1664:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1665:   PetscSFDestroy(&sfMigration);
1666:   sfMigration = sfStratified;
1667:   PetscSFSetUp(sfMigration);
1668:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1669:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1670:   if (flg) {
1671:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1672:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1673:   }

1675:   /* Create non-overlapping parallel DM and migrate internal data */
1676:   DMPlexCreate(comm, dmParallel);
1677:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1678:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1680:   /* Build the point SF without overlap */
1681:   DMPlexGetPartitionBalance(dm, &balance);
1682:   DMPlexSetPartitionBalance(*dmParallel, balance);
1683:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1684:   DMSetPointSF(*dmParallel, sfPoint);
1685:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1686:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1687:   if (flg) {PetscSFView(sfPoint, NULL);}

1689:   if (overlap > 0) {
1690:     DM                 dmOverlap;
1691:     PetscInt           nroots, nleaves, noldleaves, l;
1692:     const PetscInt    *oldLeaves;
1693:     PetscSFNode       *newRemote, *permRemote;
1694:     const PetscSFNode *oldRemote;
1695:     PetscSF            sfOverlap, sfOverlapPoint;

1697:     /* Add the partition overlap to the distributed DM */
1698:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1699:     DMDestroy(dmParallel);
1700:     *dmParallel = dmOverlap;
1701:     if (flg) {
1702:       PetscPrintf(comm, "Overlap Migration SF:\n");
1703:       PetscSFView(sfOverlap, NULL);
1704:     }

1706:     /* Re-map the migration SF to establish the full migration pattern */
1707:     PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1708:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1709:     PetscMalloc1(nleaves, &newRemote);
1710:     /* oldRemote: original root point mapping to original leaf point
1711:        newRemote: original leaf point mapping to overlapped leaf point */
1712:     if (oldLeaves) {
1713:       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1714:       PetscMalloc1(noldleaves, &permRemote);
1715:       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1716:       oldRemote = permRemote;
1717:     }
1718:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1719:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1720:     if (oldLeaves) {PetscFree(oldRemote);}
1721:     PetscSFCreate(comm, &sfOverlapPoint);
1722:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1723:     PetscSFDestroy(&sfOverlap);
1724:     PetscSFDestroy(&sfMigration);
1725:     sfMigration = sfOverlapPoint;
1726:   }
1727:   /* Cleanup Partition */
1728:   DMLabelDestroy(&lblPartition);
1729:   DMLabelDestroy(&lblMigration);
1730:   PetscSectionDestroy(&cellPartSection);
1731:   ISDestroy(&cellPart);
1732:   /* Copy BC */
1733:   DMCopyBoundary(dm, *dmParallel);
1734:   /* Create sfNatural */
1735:   if (dm->useNatural) {
1736:     PetscSection section;

1738:     DMGetLocalSection(dm, &section);
1739:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1740:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1741:   }
1742:   /* Cleanup */
1743:   if (sf) {*sf = sfMigration;}
1744:   else    {PetscSFDestroy(&sfMigration);}
1745:   PetscSFDestroy(&sfPoint);
1746:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1747:   return(0);
1748: }

1750: /*@C
1751:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1753:   Collective on dm

1755:   Input Parameters:
1756: + dm  - The non-overlapping distributed DMPlex object
1757: - overlap - The overlap of partitions (the same on all ranks)

1759:   Output Parameters:
1760: + sf - The PetscSF used for point distribution
1761: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1763:   Notes:
1764:   If the mesh was not distributed, the return value is NULL.

1766:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1767:   representation on the mesh.

1769:   Level: advanced

1771: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1772: @*/
1773: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1774: {
1775:   MPI_Comm               comm;
1776:   PetscMPIInt            size, rank;
1777:   PetscSection           rootSection, leafSection;
1778:   IS                     rootrank, leafrank;
1779:   DM                     dmCoord;
1780:   DMLabel                lblOverlap;
1781:   PetscSF                sfOverlap, sfStratified, sfPoint;
1782:   PetscErrorCode         ierr;


1790:   if (sf) *sf = NULL;
1791:   *dmOverlap  = NULL;
1792:   PetscObjectGetComm((PetscObject)dm,&comm);
1793:   MPI_Comm_size(comm, &size);
1794:   MPI_Comm_rank(comm, &rank);
1795:   if (size == 1) return(0);

1797:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1798:   /* Compute point overlap with neighbouring processes on the distributed DM */
1799:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1800:   PetscSectionCreate(comm, &rootSection);
1801:   PetscSectionCreate(comm, &leafSection);
1802:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1803:   DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1804:   /* Convert overlap label to stratified migration SF */
1805:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1806:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1807:   PetscSFDestroy(&sfOverlap);
1808:   sfOverlap = sfStratified;
1809:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1810:   PetscSFSetFromOptions(sfOverlap);

1812:   PetscSectionDestroy(&rootSection);
1813:   PetscSectionDestroy(&leafSection);
1814:   ISDestroy(&rootrank);
1815:   ISDestroy(&leafrank);
1816:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1818:   /* Build the overlapping DM */
1819:   DMPlexCreate(comm, dmOverlap);
1820:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1821:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1822:   /* Store the overlap in the new DM */
1823:   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1824:   /* Build the new point SF */
1825:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1826:   DMSetPointSF(*dmOverlap, sfPoint);
1827:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1828:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1829:   PetscSFDestroy(&sfPoint);
1830:   /* Cleanup overlap partition */
1831:   DMLabelDestroy(&lblOverlap);
1832:   if (sf) *sf = sfOverlap;
1833:   else    {PetscSFDestroy(&sfOverlap);}
1834:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1835:   return(0);
1836: }

1838: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1839: {
1840:   DM_Plex        *mesh  = (DM_Plex*) dm->data;

1843:   *overlap = mesh->overlap;
1844:   return(0);
1845: }

1847: /*@
1848:   DMPlexGetOverlap - Get the DMPlex partition overlap.

1850:   Not collective

1852:   Input Parameter:
1853: . dm - The DM

1855:   Output Parameter:
1856: . overlap - The overlap of this DM

1858:   Level: intermediate

1860: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1861: @*/
1862: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1863: {
1864:   PetscErrorCode     ierr;

1868:   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1869:   return(0);
1870: }


1873: /*@C
1874:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1875:   root process of the original's communicator.

1877:   Collective on dm

1879:   Input Parameter:
1880: . dm - the original DMPlex object

1882:   Output Parameters:
1883: + sf - the PetscSF used for point distribution (optional)
1884: - gatherMesh - the gathered DM object, or NULL

1886:   Level: intermediate

1888: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1889: @*/
1890: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1891: {
1892:   MPI_Comm       comm;
1893:   PetscMPIInt    size;
1894:   PetscPartitioner oldPart, gatherPart;

1900:   *gatherMesh = NULL;
1901:   if (sf) *sf = NULL;
1902:   comm = PetscObjectComm((PetscObject)dm);
1903:   MPI_Comm_size(comm,&size);
1904:   if (size == 1) return(0);
1905:   DMPlexGetPartitioner(dm,&oldPart);
1906:   PetscObjectReference((PetscObject)oldPart);
1907:   PetscPartitionerCreate(comm,&gatherPart);
1908:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1909:   DMPlexSetPartitioner(dm,gatherPart);
1910:   DMPlexDistribute(dm,0,sf,gatherMesh);

1912:   DMPlexSetPartitioner(dm,oldPart);
1913:   PetscPartitionerDestroy(&gatherPart);
1914:   PetscPartitionerDestroy(&oldPart);
1915:   return(0);
1916: }

1918: /*@C
1919:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1921:   Collective on dm

1923:   Input Parameter:
1924: . dm - the original DMPlex object

1926:   Output Parameters:
1927: + sf - the PetscSF used for point distribution (optional)
1928: - redundantMesh - the redundant DM object, or NULL

1930:   Level: intermediate

1932: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1933: @*/
1934: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1935: {
1936:   MPI_Comm       comm;
1937:   PetscMPIInt    size, rank;
1938:   PetscInt       pStart, pEnd, p;
1939:   PetscInt       numPoints = -1;
1940:   PetscSF        migrationSF, sfPoint, gatherSF;
1941:   DM             gatherDM, dmCoord;
1942:   PetscSFNode    *points;

1948:   *redundantMesh = NULL;
1949:   comm = PetscObjectComm((PetscObject)dm);
1950:   MPI_Comm_size(comm,&size);
1951:   if (size == 1) {
1952:     PetscObjectReference((PetscObject) dm);
1953:     *redundantMesh = dm;
1954:     if (sf) *sf = NULL;
1955:     return(0);
1956:   }
1957:   DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1958:   if (!gatherDM) return(0);
1959:   MPI_Comm_rank(comm,&rank);
1960:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1961:   numPoints = pEnd - pStart;
1962:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1963:   PetscMalloc1(numPoints,&points);
1964:   PetscSFCreate(comm,&migrationSF);
1965:   for (p = 0; p < numPoints; p++) {
1966:     points[p].index = p;
1967:     points[p].rank  = 0;
1968:   }
1969:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1970:   DMPlexCreate(comm, redundantMesh);
1971:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1972:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1973:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1974:   DMSetPointSF(*redundantMesh, sfPoint);
1975:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1976:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1977:   PetscSFDestroy(&sfPoint);
1978:   if (sf) {
1979:     PetscSF tsf;

1981:     PetscSFCompose(gatherSF,migrationSF,&tsf);
1982:     DMPlexStratifyMigrationSF(dm, tsf, sf);
1983:     PetscSFDestroy(&tsf);
1984:   }
1985:   PetscSFDestroy(&migrationSF);
1986:   PetscSFDestroy(&gatherSF);
1987:   DMDestroy(&gatherDM);
1988:   return(0);
1989: }

1991: /*@
1992:   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.

1994:   Collective

1996:   Input Parameter:
1997: . dm      - The DM object

1999:   Output Parameter:
2000: . distributed - Flag whether the DM is distributed

2002:   Level: intermediate

2004:   Notes:
2005:   This currently finds out whether at least two ranks have any DAG points.
2006:   This involves MPI_Allreduce() with one integer.
2007:   The result is currently not stashed so every call to this routine involves this global communication.

2009: .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2010: @*/
2011: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2012: {
2013:   PetscInt          pStart, pEnd, count;
2014:   MPI_Comm          comm;
2015:   PetscErrorCode    ierr;

2020:   PetscObjectGetComm((PetscObject)dm,&comm);
2021:   DMPlexGetChart(dm, &pStart, &pEnd);
2022:   count = !!(pEnd - pStart);
2023:   MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2024:   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2025:   return(0);
2026: }