Actual source code: plexdistribute.c

  1: #include <petsc/private/dmpleximpl.h>
  2: #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 26:   mesh->useradjacency = user;
 27:   mesh->useradjacencyctx = ctx;
 28:   return 0;
 29: }

 31: /*@C
 32:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 34:   Input Parameter:
 35: . dm      - The DM object

 37:   Output Parameters:
 38: + user    - The user callback
 39: - ctx     - context for callback evaluation

 41:   Level: advanced

 43: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 44: @*/
 45: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 46: {
 47:   DM_Plex *mesh = (DM_Plex *)dm->data;

 50:   if (user) *user = mesh->useradjacency;
 51:   if (ctx) *ctx = mesh->useradjacencyctx;
 52:   return 0;
 53: }

 55: /*@
 56:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 58:   Input Parameters:
 59: + dm      - The DM object
 60: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 62:   Level: intermediate

 64: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 65: @*/
 66: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 67: {
 68:   DM_Plex *mesh = (DM_Plex *) dm->data;

 71:   mesh->useAnchors = useAnchors;
 72:   return 0;
 73: }

 75: /*@
 76:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 78:   Input Parameter:
 79: . dm      - The DM object

 81:   Output Parameter:
 82: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 84:   Level: intermediate

 86: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 87: @*/
 88: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 89: {
 90:   DM_Plex *mesh = (DM_Plex *) dm->data;

 94:   *useAnchors = mesh->useAnchors;
 95:   return 0;
 96: }

 98: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
 99: {
100:   const PetscInt *cone = NULL;
101:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;

104:   DMPlexGetConeSize(dm, p, &coneSize);
105:   DMPlexGetCone(dm, p, &cone);
106:   for (c = 0; c <= coneSize; ++c) {
107:     const PetscInt  point   = !c ? p : cone[c-1];
108:     const PetscInt *support = NULL;
109:     PetscInt        supportSize, s, q;

111:     DMPlexGetSupportSize(dm, point, &supportSize);
112:     DMPlexGetSupport(dm, point, &support);
113:     for (s = 0; s < supportSize; ++s) {
114:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
115:         if (support[s] == adj[q]) break;
116:       }
118:     }
119:   }
120:   *adjSize = numAdj;
121:   return 0;
122: }

124: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
125: {
126:   const PetscInt *support = NULL;
127:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;

130:   DMPlexGetSupportSize(dm, p, &supportSize);
131:   DMPlexGetSupport(dm, p, &support);
132:   for (s = 0; s <= supportSize; ++s) {
133:     const PetscInt  point = !s ? p : support[s-1];
134:     const PetscInt *cone  = NULL;
135:     PetscInt        coneSize, c, q;

137:     DMPlexGetConeSize(dm, point, &coneSize);
138:     DMPlexGetCone(dm, point, &cone);
139:     for (c = 0; c < coneSize; ++c) {
140:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
141:         if (cone[c] == adj[q]) break;
142:       }
144:     }
145:   }
146:   *adjSize = numAdj;
147:   return 0;
148: }

150: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
151: {
152:   PetscInt      *star = NULL;
153:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

156:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
157:   for (s = 0; s < starSize*2; s += 2) {
158:     const PetscInt *closure = NULL;
159:     PetscInt        closureSize, c, q;

161:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
162:     for (c = 0; c < closureSize*2; c += 2) {
163:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
164:         if (closure[c] == adj[q]) break;
165:       }
167:     }
168:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169:   }
170:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
171:   *adjSize = numAdj;
172:   return 0;
173: }

175: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
176: {
177:   static PetscInt asiz = 0;
178:   PetscInt maxAnchors = 1;
179:   PetscInt aStart = -1, aEnd = -1;
180:   PetscInt maxAdjSize;
181:   PetscSection aSec = NULL;
182:   IS aIS = NULL;
183:   const PetscInt *anchors;
184:   DM_Plex *mesh = (DM_Plex *)dm->data;

187:   if (useAnchors) {
188:     DMPlexGetAnchors(dm,&aSec,&aIS);
189:     if (aSec) {
190:       PetscSectionGetMaxDof(aSec,&maxAnchors);
191:       maxAnchors = PetscMax(1,maxAnchors);
192:       PetscSectionGetChart(aSec,&aStart,&aEnd);
193:       ISGetIndices(aIS,&anchors);
194:     }
195:   }
196:   if (!*adj) {
197:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

199:     DMPlexGetChart(dm, &pStart,&pEnd);
200:     DMPlexGetDepth(dm, &depth);
201:     depth = PetscMax(depth, -depth);
202:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
203:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
204:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
205:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
206:     asiz *= maxAnchors;
207:     asiz  = PetscMin(asiz,pEnd-pStart);
208:     PetscMalloc1(asiz,adj);
209:   }
210:   if (*adjSize < 0) *adjSize = asiz;
211:   maxAdjSize = *adjSize;
212:   if (mesh->useradjacency) {
213:     (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
214:   } else if (useTransitiveClosure) {
215:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
216:   } else if (useCone) {
217:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
218:   } else {
219:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
220:   }
221:   if (useAnchors && aSec) {
222:     PetscInt origSize = *adjSize;
223:     PetscInt numAdj = origSize;
224:     PetscInt i = 0, j;
225:     PetscInt *orig = *adj;

227:     while (i < origSize) {
228:       PetscInt p = orig[i];
229:       PetscInt aDof = 0;

231:       if (p >= aStart && p < aEnd) {
232:         PetscSectionGetDof(aSec,p,&aDof);
233:       }
234:       if (aDof) {
235:         PetscInt aOff;
236:         PetscInt s, q;

238:         for (j = i + 1; j < numAdj; j++) {
239:           orig[j - 1] = orig[j];
240:         }
241:         origSize--;
242:         numAdj--;
243:         PetscSectionGetOffset(aSec,p,&aOff);
244:         for (s = 0; s < aDof; ++s) {
245:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
246:             if (anchors[aOff+s] == orig[q]) break;
247:           }
249:         }
250:       }
251:       else {
252:         i++;
253:       }
254:     }
255:     *adjSize = numAdj;
256:     ISRestoreIndices(aIS,&anchors);
257:   }
258:   return 0;
259: }

261: /*@
262:   DMPlexGetAdjacency - Return all points adjacent to the given point

264:   Input Parameters:
265: + dm - The DM object
266: - p  - The point

268:   Input/Output Parameters:
269: + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
270:             on output the number of adjacent points
271: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
272:         on output contains the adjacent points

274:   Level: advanced

276:   Notes:
277:     The user must PetscFree the adj array if it was not passed in.

279: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
280: @*/
281: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
282: {
283:   PetscBool      useCone, useClosure, useAnchors;

289:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
290:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
291:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
292:   return 0;
293: }

295: /*@
296:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

298:   Collective on dm

300:   Input Parameters:
301: + dm      - The DM
302: . sfPoint - The PetscSF which encodes point connectivity
303: . rootRankSection -
304: . rootRanks -
305: . leftRankSection -
306: - leafRanks -

308:   Output Parameters:
309: + processRanks - A list of process neighbors, or NULL
310: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

312:   Level: developer

314: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
315: @*/
316: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
317: {
318:   const PetscSFNode *remotePoints;
319:   PetscInt          *localPointsNew;
320:   PetscSFNode       *remotePointsNew;
321:   const PetscInt    *nranks;
322:   PetscInt          *ranksNew;
323:   PetscBT            neighbors;
324:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
325:   PetscMPIInt        size, proc, rank;

331:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
332:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
333:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
334:   PetscBTCreate(size, &neighbors);
335:   PetscBTMemzero(size, neighbors);
336:   /* Compute root-to-leaf process connectivity */
337:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
338:   ISGetIndices(rootRanks, &nranks);
339:   for (p = pStart; p < pEnd; ++p) {
340:     PetscInt ndof, noff, n;

342:     PetscSectionGetDof(rootRankSection, p, &ndof);
343:     PetscSectionGetOffset(rootRankSection, p, &noff);
344:     for (n = 0; n < ndof; ++n) PetscBTSet(neighbors, nranks[noff+n]);
345:   }
346:   ISRestoreIndices(rootRanks, &nranks);
347:   /* Compute leaf-to-neighbor process connectivity */
348:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
349:   ISGetIndices(leafRanks, &nranks);
350:   for (p = pStart; p < pEnd; ++p) {
351:     PetscInt ndof, noff, n;

353:     PetscSectionGetDof(leafRankSection, p, &ndof);
354:     PetscSectionGetOffset(leafRankSection, p, &noff);
355:     for (n = 0; n < ndof; ++n) PetscBTSet(neighbors, nranks[noff+n]);
356:   }
357:   ISRestoreIndices(leafRanks, &nranks);
358:   /* Compute leaf-to-root process connectivity */
359:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
360:   /* Calculate edges */
361:   PetscBTClear(neighbors, rank);
362:   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
363:   PetscMalloc1(numNeighbors, &ranksNew);
364:   PetscMalloc1(numNeighbors, &localPointsNew);
365:   PetscMalloc1(numNeighbors, &remotePointsNew);
366:   for (proc = 0, n = 0; proc < size; ++proc) {
367:     if (PetscBTLookup(neighbors, proc)) {
368:       ranksNew[n]              = proc;
369:       localPointsNew[n]        = proc;
370:       remotePointsNew[n].index = rank;
371:       remotePointsNew[n].rank  = proc;
372:       ++n;
373:     }
374:   }
375:   PetscBTDestroy(&neighbors);
376:   if (processRanks) ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);
377:   else              PetscFree(ranksNew);
378:   if (sfProcess) {
379:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
380:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
381:     PetscSFSetFromOptions(*sfProcess);
382:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
383:   }
384:   return 0;
385: }

387: /*@
388:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

390:   Collective on dm

392:   Input Parameter:
393: . dm - The DM

395:   Output Parameters:
396: + rootSection - The number of leaves for a given root point
397: . rootrank    - The rank of each edge into the root point
398: . leafSection - The number of processes sharing a given leaf point
399: - leafrank    - The rank of each process sharing a leaf point

401:   Level: developer

403: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
404: @*/
405: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
406: {
407:   MPI_Comm        comm;
408:   PetscSF         sfPoint;
409:   const PetscInt *rootdegree;
410:   PetscInt       *myrank, *remoterank;
411:   PetscInt        pStart, pEnd, p, nedges;
412:   PetscMPIInt     rank;

414:   PetscObjectGetComm((PetscObject) dm, &comm);
415:   MPI_Comm_rank(comm, &rank);
416:   DMPlexGetChart(dm, &pStart, &pEnd);
417:   DMGetPointSF(dm, &sfPoint);
418:   /* Compute number of leaves for each root */
419:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
420:   PetscSectionSetChart(rootSection, pStart, pEnd);
421:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
422:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
423:   for (p = pStart; p < pEnd; ++p) PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);
424:   PetscSectionSetUp(rootSection);
425:   /* Gather rank of each leaf to root */
426:   PetscSectionGetStorageSize(rootSection, &nedges);
427:   PetscMalloc1(pEnd-pStart, &myrank);
428:   PetscMalloc1(nedges,  &remoterank);
429:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
430:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
431:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
432:   PetscFree(myrank);
433:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
434:   /* Distribute remote ranks to leaves */
435:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
436:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
437:   return 0;
438: }

440: /*@C
441:   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.

443:   Collective on dm

445:   Input Parameters:
446: + dm          - The DM
447: . levels      - Number of overlap levels
448: . rootSection - The number of leaves for a given root point
449: . rootrank    - The rank of each edge into the root point
450: . leafSection - The number of processes sharing a given leaf point
451: - leafrank    - The rank of each process sharing a leaf point

453:   Output Parameter:
454: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

456:   Level: developer

458: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
459: @*/
460: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
461: {
462:   MPI_Comm           comm;
463:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
464:   PetscSF            sfPoint;
465:   const PetscSFNode *remote;
466:   const PetscInt    *local;
467:   const PetscInt    *nrank, *rrank;
468:   PetscInt          *adj = NULL;
469:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
470:   PetscMPIInt        rank, size;
471:   PetscBool          flg;

473:   *ovLabel = NULL;
474:   PetscObjectGetComm((PetscObject) dm, &comm);
475:   MPI_Comm_size(comm, &size);
476:   MPI_Comm_rank(comm, &rank);
477:   if (size == 1) return 0;
478:   DMGetPointSF(dm, &sfPoint);
479:   DMPlexGetChart(dm, &pStart, &pEnd);
480:   if (!levels) {
481:     /* Add owned points */
482:     DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
483:     for (p = pStart; p < pEnd; ++p) DMLabelSetValue(*ovLabel, p, rank);
484:     return 0;
485:   }
486:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
487:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
488:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
489:   /* Handle leaves: shared with the root point */
490:   for (l = 0; l < nleaves; ++l) {
491:     PetscInt adjSize = PETSC_DETERMINE, a;

493:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
494:     for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);
495:   }
496:   ISGetIndices(rootrank, &rrank);
497:   ISGetIndices(leafrank, &nrank);
498:   /* Handle roots */
499:   for (p = pStart; p < pEnd; ++p) {
500:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

502:     if ((p >= sStart) && (p < sEnd)) {
503:       /* Some leaves share a root with other leaves on different processes */
504:       PetscSectionGetDof(leafSection, p, &neighbors);
505:       if (neighbors) {
506:         PetscSectionGetOffset(leafSection, p, &noff);
507:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
508:         for (n = 0; n < neighbors; ++n) {
509:           const PetscInt remoteRank = nrank[noff+n];

511:           if (remoteRank == rank) continue;
512:           for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
513:         }
514:       }
515:     }
516:     /* Roots are shared with leaves */
517:     PetscSectionGetDof(rootSection, p, &neighbors);
518:     if (!neighbors) continue;
519:     PetscSectionGetOffset(rootSection, p, &noff);
520:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
521:     for (n = 0; n < neighbors; ++n) {
522:       const PetscInt remoteRank = rrank[noff+n];

524:       if (remoteRank == rank) continue;
525:       for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
526:     }
527:   }
528:   PetscFree(adj);
529:   ISRestoreIndices(rootrank, &rrank);
530:   ISRestoreIndices(leafrank, &nrank);
531:   /* Add additional overlap levels */
532:   for (l = 1; l < levels; l++) {
533:     /* Propagate point donations over SF to capture remote connections */
534:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
535:     /* Add next level of point donations to the label */
536:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
537:   }
538:   /* We require the closure in the overlap */
539:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
540:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
541:   if (flg) {
542:     PetscViewer viewer;
543:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
544:     DMLabelView(ovAdjByRank, viewer);
545:   }
546:   /* Invert sender to receiver label */
547:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
548:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
549:   /* Add owned points, except for shared local points */
550:   for (p = pStart; p < pEnd; ++p) DMLabelSetValue(*ovLabel, p, rank);
551:   for (l = 0; l < nleaves; ++l) {
552:     DMLabelClearValue(*ovLabel, local[l], rank);
553:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
554:   }
555:   /* Clean up */
556:   DMLabelDestroy(&ovAdjByRank);
557:   return 0;
558: }

560: /*@C
561:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

563:   Collective on dm

565:   Input Parameters:
566: + dm          - The DM
567: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

569:   Output Parameter:
570: . migrationSF - An SF that maps original points in old locations to points in new locations

572:   Level: developer

574: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
575: @*/
576: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
577: {
578:   MPI_Comm           comm;
579:   PetscMPIInt        rank, size;
580:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
581:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
582:   PetscInt          *depthRecv, *depthShift, *depthIdx;
583:   PetscSFNode       *iremote;
584:   PetscSF            pointSF;
585:   const PetscInt    *sharedLocal;
586:   const PetscSFNode *overlapRemote, *sharedRemote;

589:   PetscObjectGetComm((PetscObject)dm, &comm);
590:   MPI_Comm_rank(comm, &rank);
591:   MPI_Comm_size(comm, &size);
592:   DMGetDimension(dm, &dim);

594:   /* Before building the migration SF we need to know the new stratum offsets */
595:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
596:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
597:   for (d=0; d<dim+1; d++) {
598:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
599:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
600:   }
601:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
602:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);
603:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);

605:   /* Count received points in each stratum and compute the internal strata shift */
606:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
607:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
608:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
609:   depthShift[dim] = 0;
610:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
611:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
612:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
613:   for (d=0; d<dim+1; d++) {
614:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
615:     depthIdx[d] = pStart + depthShift[d];
616:   }

618:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
619:   DMPlexGetChart(dm, &pStart, &pEnd);
620:   newLeaves = pEnd - pStart + nleaves;
621:   PetscMalloc1(newLeaves, &ilocal);
622:   PetscMalloc1(newLeaves, &iremote);
623:   /* First map local points to themselves */
624:   for (d=0; d<dim+1; d++) {
625:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
626:     for (p=pStart; p<pEnd; p++) {
627:       point = p + depthShift[d];
628:       ilocal[point] = point;
629:       iremote[point].index = p;
630:       iremote[point].rank = rank;
631:       depthIdx[d]++;
632:     }
633:   }

635:   /* Add in the remote roots for currently shared points */
636:   DMGetPointSF(dm, &pointSF);
637:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
638:   for (d=0; d<dim+1; d++) {
639:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
640:     for (p=0; p<numSharedPoints; p++) {
641:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
642:         point = sharedLocal[p] + depthShift[d];
643:         iremote[point].index = sharedRemote[p].index;
644:         iremote[point].rank = sharedRemote[p].rank;
645:       }
646:     }
647:   }

649:   /* Now add the incoming overlap points */
650:   for (p=0; p<nleaves; p++) {
651:     point = depthIdx[remoteDepths[p]];
652:     ilocal[point] = point;
653:     iremote[point].index = overlapRemote[p].index;
654:     iremote[point].rank = overlapRemote[p].rank;
655:     depthIdx[remoteDepths[p]]++;
656:   }
657:   PetscFree2(pointDepths,remoteDepths);

659:   PetscSFCreate(comm, migrationSF);
660:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
661:   PetscSFSetFromOptions(*migrationSF);
662:   DMPlexGetChart(dm, &pStart, &pEnd);
663:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

665:   PetscFree3(depthRecv, depthShift, depthIdx);
666:   return 0;
667: }

669: /*@
670:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

672:   Input Parameters:
673: + dm          - The DM
674: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

676:   Output Parameter:
677: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

679:   Note:
680:   This lexicographically sorts by (depth, cellType)

682:   Level: developer

684: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
685: @*/
686: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
687: {
688:   MPI_Comm           comm;
689:   PetscMPIInt        rank, size;
690:   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
691:   PetscSFNode       *pointDepths, *remoteDepths;
692:   PetscInt          *ilocal;
693:   PetscInt          *depthRecv, *depthShift, *depthIdx;
694:   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
695:   const PetscSFNode *iremote;

698:   PetscObjectGetComm((PetscObject) dm, &comm);
699:   MPI_Comm_rank(comm, &rank);
700:   MPI_Comm_size(comm, &size);
701:   DMPlexGetDepth(dm, &ldepth);
702:   DMGetDimension(dm, &dim);
703:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
705:   PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);

707:   /* Before building the migration SF we need to know the new stratum offsets */
708:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
709:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
710:   for (d = 0; d < depth+1; ++d) {
711:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
712:     for (p = pStart; p < pEnd; ++p) {
713:       DMPolytopeType ct;

715:       DMPlexGetCellType(dm, p, &ct);
716:       pointDepths[p].index = d;
717:       pointDepths[p].rank  = ct;
718:     }
719:   }
720:   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
721:   PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
722:   PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
723:   /* Count received points in each stratum and compute the internal strata shift */
724:   PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx);
725:   for (p = 0; p < nleaves; ++p) {
726:     if (remoteDepths[p].rank < 0) {
727:       ++depthRecv[remoteDepths[p].index];
728:     } else {
729:       ++ctRecv[remoteDepths[p].rank];
730:     }
731:   }
732:   {
733:     PetscInt depths[4], dims[4], shift = 0, i, c;

735:     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
736:          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
737:      */
738:     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
739:     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
740:     for (i = 0; i <= depth; ++i) {
741:       const PetscInt dep = depths[i];
742:       const PetscInt dim = dims[i];

744:       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
745:         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
746:         ctShift[c] = shift;
747:         shift     += ctRecv[c];
748:       }
749:       depthShift[dep] = shift;
750:       shift          += depthRecv[dep];
751:     }
752:     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
753:       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);

755:       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
756:         ctShift[c] = shift;
757:         shift     += ctRecv[c];
758:       }
759:     }
760:   }
761:   /* Derive a new local permutation based on stratified indices */
762:   PetscMalloc1(nleaves, &ilocal);
763:   for (p = 0; p < nleaves; ++p) {
764:     const PetscInt       dep = remoteDepths[p].index;
765:     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;

767:     if ((PetscInt) ct < 0) {
768:       ilocal[p] = depthShift[dep] + depthIdx[dep];
769:       ++depthIdx[dep];
770:     } else {
771:       ilocal[p] = ctShift[ct] + ctIdx[ct];
772:       ++ctIdx[ct];
773:     }
774:   }
775:   PetscSFCreate(comm, migrationSF);
776:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
777:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode*)iremote, PETSC_COPY_VALUES);
778:   PetscFree2(pointDepths,remoteDepths);
779:   PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx);
780:   PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
781:   return 0;
782: }

784: /*@
785:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

787:   Collective on dm

789:   Input Parameters:
790: + dm - The DMPlex object
791: . pointSF - The PetscSF describing the communication pattern
792: . originalSection - The PetscSection for existing data layout
793: - originalVec - The existing data in a local vector

795:   Output Parameters:
796: + newSection - The PetscSF describing the new data layout
797: - newVec - The new data in a local vector

799:   Level: developer

801: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
802: @*/
803: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
804: {
805:   PetscSF        fieldSF;
806:   PetscInt      *remoteOffsets, fieldSize;
807:   PetscScalar   *originalValues, *newValues;

809:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
810:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

812:   PetscSectionGetStorageSize(newSection, &fieldSize);
813:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
814:   VecSetType(newVec,dm->vectype);

816:   VecGetArray(originalVec, &originalValues);
817:   VecGetArray(newVec, &newValues);
818:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
819:   PetscFree(remoteOffsets);
820:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
821:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
822:   PetscSFDestroy(&fieldSF);
823:   VecRestoreArray(newVec, &newValues);
824:   VecRestoreArray(originalVec, &originalValues);
825:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
826:   return 0;
827: }

829: /*@
830:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

832:   Collective on dm

834:   Input Parameters:
835: + dm - The DMPlex object
836: . pointSF - The PetscSF describing the communication pattern
837: . originalSection - The PetscSection for existing data layout
838: - originalIS - The existing data

840:   Output Parameters:
841: + newSection - The PetscSF describing the new data layout
842: - newIS - The new data

844:   Level: developer

846: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
847: @*/
848: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
849: {
850:   PetscSF         fieldSF;
851:   PetscInt       *newValues, *remoteOffsets, fieldSize;
852:   const PetscInt *originalValues;

854:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
855:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

857:   PetscSectionGetStorageSize(newSection, &fieldSize);
858:   PetscMalloc1(fieldSize, &newValues);

860:   ISGetIndices(originalIS, &originalValues);
861:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
862:   PetscFree(remoteOffsets);
863:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
864:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
865:   PetscSFDestroy(&fieldSF);
866:   ISRestoreIndices(originalIS, &originalValues);
867:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
868:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
869:   return 0;
870: }

872: /*@
873:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

875:   Collective on dm

877:   Input Parameters:
878: + dm - The DMPlex object
879: . pointSF - The PetscSF describing the communication pattern
880: . originalSection - The PetscSection for existing data layout
881: . datatype - The type of data
882: - originalData - The existing data

884:   Output Parameters:
885: + newSection - The PetscSection describing the new data layout
886: - newData - The new data

888:   Level: developer

890: .seealso: DMPlexDistribute(), DMPlexDistributeField()
891: @*/
892: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
893: {
894:   PetscSF        fieldSF;
895:   PetscInt      *remoteOffsets, fieldSize;
896:   PetscMPIInt    dataSize;

898:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
899:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

901:   PetscSectionGetStorageSize(newSection, &fieldSize);
902:   MPI_Type_size(datatype, &dataSize);
903:   PetscMalloc(fieldSize * dataSize, newData);

905:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
906:   PetscFree(remoteOffsets);
907:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
908:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
909:   PetscSFDestroy(&fieldSF);
910:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
911:   return 0;
912: }

914: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
915: {
916:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
917:   MPI_Comm               comm;
918:   PetscSF                coneSF;
919:   PetscSection           originalConeSection, newConeSection;
920:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
921:   PetscBool              flg;

925:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
926:   /* Distribute cone section */
927:   PetscObjectGetComm((PetscObject)dm, &comm);
928:   DMPlexGetConeSection(dm, &originalConeSection);
929:   DMPlexGetConeSection(dmParallel, &newConeSection);
930:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
931:   DMSetUp(dmParallel);
932:   /* Communicate and renumber cones */
933:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
934:   PetscFree(remoteOffsets);
935:   DMPlexGetCones(dm, &cones);
936:   if (original) {
937:     PetscInt numCones;

939:     PetscSectionGetStorageSize(originalConeSection,&numCones);
940:     PetscMalloc1(numCones,&globCones);
941:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
942:   } else {
943:     globCones = cones;
944:   }
945:   DMPlexGetCones(dmParallel, &newCones);
946:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
947:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
948:   if (original) {
949:     PetscFree(globCones);
950:   }
951:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
952:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
953:   if (PetscDefined(USE_DEBUG)) {
954:     PetscInt  p;
955:     PetscBool valid = PETSC_TRUE;
956:     for (p = 0; p < newConesSize; ++p) {
957:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
958:     }
960:   }
961:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
962:   if (flg) {
963:     PetscPrintf(comm, "Serial Cone Section:\n");
964:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
965:     PetscPrintf(comm, "Parallel Cone Section:\n");
966:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
967:     PetscSFView(coneSF, NULL);
968:   }
969:   DMPlexGetConeOrientations(dm, &cones);
970:   DMPlexGetConeOrientations(dmParallel, &newCones);
971:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
972:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
973:   PetscSFDestroy(&coneSF);
974:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
975:   /* Create supports and stratify DMPlex */
976:   {
977:     PetscInt pStart, pEnd;

979:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
980:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
981:   }
982:   DMPlexSymmetrize(dmParallel);
983:   DMPlexStratify(dmParallel);
984:   {
985:     PetscBool useCone, useClosure, useAnchors;

987:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
988:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
989:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
990:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
991:   }
992:   return 0;
993: }

995: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
996: {
997:   MPI_Comm         comm;
998:   DM               cdm, cdmParallel;
999:   PetscSection     originalCoordSection, newCoordSection;
1000:   Vec              originalCoordinates, newCoordinates;
1001:   PetscInt         bs;
1002:   PetscBool        isper;
1003:   const char      *name;
1004:   const PetscReal *maxCell, *L;
1005:   const DMBoundaryType *bd;


1010:   PetscObjectGetComm((PetscObject)dm, &comm);
1011:   DMGetCoordinateSection(dm, &originalCoordSection);
1012:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1013:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1014:   if (originalCoordinates) {
1015:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1016:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1017:     PetscObjectSetName((PetscObject) newCoordinates, name);

1019:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1020:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1021:     VecGetBlockSize(originalCoordinates, &bs);
1022:     VecSetBlockSize(newCoordinates, bs);
1023:     VecDestroy(&newCoordinates);
1024:   }
1025:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1026:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1027:   DMGetCoordinateDM(dm, &cdm);
1028:   DMGetCoordinateDM(dmParallel, &cdmParallel);
1029:   DMCopyDisc(cdm, cdmParallel);
1030:   return 0;
1031: }

1033: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1034: {
1035:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1036:   MPI_Comm         comm;
1037:   DMLabel          depthLabel;
1038:   PetscMPIInt      rank;
1039:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1040:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1041:   PetscObjectState depthState = -1;


1046:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1047:   PetscObjectGetComm((PetscObject)dm, &comm);
1048:   MPI_Comm_rank(comm, &rank);

1050:   /* If the user has changed the depth label, communicate it instead */
1051:   DMPlexGetDepth(dm, &depth);
1052:   DMPlexGetDepthLabel(dm, &depthLabel);
1053:   if (depthLabel) PetscObjectStateGet((PetscObject) depthLabel, &depthState);
1054:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1055:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1056:   if (sendDepth) {
1057:     DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1058:     DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1059:   }
1060:   /* Everyone must have either the same number of labels, or none */
1061:   DMGetNumLabels(dm, &numLocalLabels);
1062:   numLabels = numLocalLabels;
1063:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1064:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1065:   for (l = 0; l < numLabels; ++l) {
1066:     DMLabel     label = NULL, labelNew = NULL;
1067:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1068:     const char *name = NULL;

1070:     if (hasLabels) {
1071:       DMGetLabelByNum(dm, l, &label);
1072:       /* Skip "depth" because it is recreated */
1073:       PetscObjectGetName((PetscObject) label, &name);
1074:       PetscStrcmp(name, "depth", &isDepth);
1075:     }
1076:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1077:     if (isDepth && !sendDepth) continue;
1078:     DMLabelDistribute(label, migrationSF, &labelNew);
1079:     if (isDepth) {
1080:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1081:       PetscInt gdepth;

1083:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1085:       for (d = 0; d <= gdepth; ++d) {
1086:         PetscBool has;

1088:         DMLabelHasStratum(labelNew, d, &has);
1089:         if (!has) DMLabelAddStratum(labelNew, d);
1090:       }
1091:     }
1092:     DMAddLabel(dmParallel, labelNew);
1093:     /* Put the output flag in the new label */
1094:     if (hasLabels) DMGetLabelOutput(dm, name, &lisOutput);
1095:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1096:     PetscObjectGetName((PetscObject) labelNew, &name);
1097:     DMSetLabelOutput(dmParallel, name, isOutput);
1098:     DMLabelDestroy(&labelNew);
1099:   }
1100:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1101:   return 0;
1102: }

1104: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1105: {
1106:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1107:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1108:   MPI_Comm        comm;
1109:   DM              refTree;
1110:   PetscSection    origParentSection, newParentSection;
1111:   PetscInt        *origParents, *origChildIDs;
1112:   PetscBool       flg;

1116:   PetscObjectGetComm((PetscObject)dm, &comm);

1118:   /* Set up tree */
1119:   DMPlexGetReferenceTree(dm,&refTree);
1120:   DMPlexSetReferenceTree(dmParallel,refTree);
1121:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1122:   if (origParentSection) {
1123:     PetscInt        pStart, pEnd;
1124:     PetscInt        *newParents, *newChildIDs, *globParents;
1125:     PetscInt        *remoteOffsetsParents, newParentSize;
1126:     PetscSF         parentSF;

1128:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1129:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1130:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1131:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1132:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1133:     PetscFree(remoteOffsetsParents);
1134:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1135:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1136:     if (original) {
1137:       PetscInt numParents;

1139:       PetscSectionGetStorageSize(origParentSection,&numParents);
1140:       PetscMalloc1(numParents,&globParents);
1141:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1142:     }
1143:     else {
1144:       globParents = origParents;
1145:     }
1146:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1147:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1148:     if (original) {
1149:       PetscFree(globParents);
1150:     }
1151:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1152:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1153:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1154:     if (PetscDefined(USE_DEBUG)) {
1155:       PetscInt  p;
1156:       PetscBool valid = PETSC_TRUE;
1157:       for (p = 0; p < newParentSize; ++p) {
1158:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1159:       }
1161:     }
1162:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1163:     if (flg) {
1164:       PetscPrintf(comm, "Serial Parent Section: \n");
1165:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1166:       PetscPrintf(comm, "Parallel Parent Section: \n");
1167:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1168:       PetscSFView(parentSF, NULL);
1169:     }
1170:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1171:     PetscSectionDestroy(&newParentSection);
1172:     PetscFree2(newParents,newChildIDs);
1173:     PetscSFDestroy(&parentSF);
1174:   }
1175:   pmesh->useAnchors = mesh->useAnchors;
1176:   return 0;
1177: }

1179: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1180: {
1181:   PetscMPIInt            rank, size;
1182:   MPI_Comm               comm;


1187:   /* Create point SF for parallel mesh */
1188:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1189:   PetscObjectGetComm((PetscObject)dm, &comm);
1190:   MPI_Comm_rank(comm, &rank);
1191:   MPI_Comm_size(comm, &size);
1192:   {
1193:     const PetscInt *leaves;
1194:     PetscSFNode    *remotePoints, *rowners, *lowners;
1195:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1196:     PetscInt        pStart, pEnd;

1198:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1199:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1200:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1201:     for (p=0; p<numRoots; p++) {
1202:       rowners[p].rank  = -1;
1203:       rowners[p].index = -1;
1204:     }
1205:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1206:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1207:     for (p = 0; p < numLeaves; ++p) {
1208:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1209:         lowners[p].rank  = rank;
1210:         lowners[p].index = leaves ? leaves[p] : p;
1211:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1212:         lowners[p].rank  = -2;
1213:         lowners[p].index = -2;
1214:       }
1215:     }
1216:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1217:       rowners[p].rank  = -3;
1218:       rowners[p].index = -3;
1219:     }
1220:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1221:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1222:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1223:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1224:     for (p = 0; p < numLeaves; ++p) {
1226:       if (lowners[p].rank != rank) ++numGhostPoints;
1227:     }
1228:     PetscMalloc1(numGhostPoints, &ghostPoints);
1229:     PetscMalloc1(numGhostPoints, &remotePoints);
1230:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1231:       if (lowners[p].rank != rank) {
1232:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1233:         remotePoints[gp].rank  = lowners[p].rank;
1234:         remotePoints[gp].index = lowners[p].index;
1235:         ++gp;
1236:       }
1237:     }
1238:     PetscFree2(rowners,lowners);
1239:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1240:     PetscSFSetFromOptions((dmParallel)->sf);
1241:   }
1242:   {
1243:     PetscBool useCone, useClosure, useAnchors;

1245:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1246:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1247:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1248:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1249:   }
1250:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1251:   return 0;
1252: }

1254: /*@
1255:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1257:   Input Parameters:
1258: + dm - The DMPlex object
1259: - flg - Balance the partition?

1261:   Level: intermediate

1263: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1264: @*/
1265: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1266: {
1267:   DM_Plex *mesh = (DM_Plex *)dm->data;

1269:   mesh->partitionBalance = flg;
1270:   return 0;
1271: }

1273: /*@
1274:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1276:   Input Parameter:
1277: . dm - The DMPlex object

1279:   Output Parameter:
1280: . flg - Balance the partition?

1282:   Level: intermediate

1284: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1285: @*/
1286: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1287: {
1288:   DM_Plex *mesh = (DM_Plex *)dm->data;

1292:   *flg = mesh->partitionBalance;
1293:   return 0;
1294: }

1296: typedef struct {
1297:   PetscInt vote, rank, index;
1298: } Petsc3Int;

1300: /* MaxLoc, but carry a third piece of information around */
1301: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1302: {
1303:   Petsc3Int *a = (Petsc3Int *)inout_;
1304:   Petsc3Int *b = (Petsc3Int *)in_;
1305:   PetscInt i, len = *len_;
1306:   for (i = 0; i < len; i++) {
1307:     if (a[i].vote < b[i].vote) {
1308:       a[i].vote = b[i].vote;
1309:       a[i].rank = b[i].rank;
1310:       a[i].index = b[i].index;
1311:     } else if (a[i].vote <= b[i].vote) {
1312:       if (a[i].rank >= b[i].rank) {
1313:         a[i].rank = b[i].rank;
1314:         a[i].index = b[i].index;
1315:       }
1316:     }
1317:   }
1318: }

1320: /*@C
1321:   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration

1323:   Input Parameters:
1324: + dm          - The source DMPlex object
1325: . migrationSF - The star forest that describes the parallel point remapping
1326: - ownership   - Flag causing a vote to determine point ownership

1328:   Output Parameter:
1329: . pointSF     - The star forest describing the point overlap in the remapped DM

1331:   Notes:
1332:   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.

1334:   Level: developer

1336: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1337: @*/
1338: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1339: {
1340:   PetscMPIInt        rank, size;
1341:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1342:   PetscInt          *pointLocal;
1343:   const PetscInt    *leaves;
1344:   const PetscSFNode *roots;
1345:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1346:   Vec                shifts;
1347:   const PetscInt     numShifts = 13759;
1348:   const PetscScalar *shift = NULL;
1349:   const PetscBool    shiftDebug = PETSC_FALSE;
1350:   PetscBool          balance;

1353:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1354:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1355:   PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);

1357:   DMPlexGetPartitionBalance(dm, &balance);
1358:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1359:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1360:   if (ownership) {
1361:     MPI_Op       op;
1362:     MPI_Datatype datatype;
1363:     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1364:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1365:     if (balance) {
1366:       PetscRandom r;

1368:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1369:       PetscRandomSetInterval(r, 0, 2467*size);
1370:       VecCreate(PETSC_COMM_SELF, &shifts);
1371:       VecSetSizes(shifts, numShifts, numShifts);
1372:       VecSetType(shifts, VECSTANDARD);
1373:       VecSetRandom(shifts, r);
1374:       PetscRandomDestroy(&r);
1375:       VecGetArrayRead(shifts, &shift);
1376:     }

1378:     PetscMalloc1(nroots, &rootVote);
1379:     PetscMalloc1(nleaves, &leafVote);
1380:     /* Point ownership vote: Process with highest rank owns shared points */
1381:     for (p = 0; p < nleaves; ++p) {
1382:       if (shiftDebug) {
1383:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1384:       }
1385:       /* Either put in a bid or we know we own it */
1386:       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1387:       leafVote[p].rank = rank;
1388:       leafVote[p].index = p;
1389:     }
1390:     for (p = 0; p < nroots; p++) {
1391:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1392:       rootVote[p].vote  = -3;
1393:       rootVote[p].rank  = -3;
1394:       rootVote[p].index = -3;
1395:     }
1396:     MPI_Type_contiguous(3, MPIU_INT, &datatype);
1397:     MPI_Type_commit(&datatype);
1398:     MPI_Op_create(&MaxLocCarry, 1, &op);
1399:     PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1400:     PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1401:     MPI_Op_free(&op);
1402:     MPI_Type_free(&datatype);
1403:     for (p = 0; p < nroots; p++) {
1404:       rootNodes[p].rank = rootVote[p].rank;
1405:       rootNodes[p].index = rootVote[p].index;
1406:     }
1407:     PetscFree(leafVote);
1408:     PetscFree(rootVote);
1409:   } else {
1410:     for (p = 0; p < nroots; p++) {
1411:       rootNodes[p].index = -1;
1412:       rootNodes[p].rank = rank;
1413:     }
1414:     for (p = 0; p < nleaves; p++) {
1415:       /* Write new local id into old location */
1416:       if (roots[p].rank == rank) {
1417:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1418:       }
1419:     }
1420:   }
1421:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);
1422:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);

1424:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1425:     if (leafNodes[p].rank != rank) npointLeaves++;
1426:   }
1427:   PetscMalloc1(npointLeaves, &pointLocal);
1428:   PetscMalloc1(npointLeaves, &pointRemote);
1429:   for (idx = 0, p = 0; p < nleaves; p++) {
1430:     if (leafNodes[p].rank != rank) {
1431:       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1432:       pointLocal[idx] = p;
1433:       pointRemote[idx] = leafNodes[p];
1434:       idx++;
1435:     }
1436:   }
1437:   if (shift) {
1438:     VecRestoreArrayRead(shifts, &shift);
1439:     VecDestroy(&shifts);
1440:   }
1441:   if (shiftDebug) PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);
1442:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1443:   PetscSFSetFromOptions(*pointSF);
1444:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1445:   PetscFree2(rootNodes, leafNodes);
1446:   PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1447:   return 0;
1448: }

1450: /*@C
1451:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1453:   Collective on dm

1455:   Input Parameters:
1456: + dm       - The source DMPlex object
1457: - sf       - The star forest communication context describing the migration pattern

1459:   Output Parameter:
1460: . targetDM - The target DMPlex object

1462:   Level: intermediate

1464: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1465: @*/
1466: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1467: {
1468:   MPI_Comm               comm;
1469:   PetscInt               dim, cdim, nroots;
1470:   PetscSF                sfPoint;
1471:   ISLocalToGlobalMapping ltogMigration;
1472:   ISLocalToGlobalMapping ltogOriginal = NULL;
1473:   PetscBool              flg;

1476:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1477:   PetscObjectGetComm((PetscObject) dm, &comm);
1478:   DMGetDimension(dm, &dim);
1479:   DMSetDimension(targetDM, dim);
1480:   DMGetCoordinateDim(dm, &cdim);
1481:   DMSetCoordinateDim(targetDM, cdim);

1483:   /* Check for a one-to-all distribution pattern */
1484:   DMGetPointSF(dm, &sfPoint);
1485:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1486:   if (nroots >= 0) {
1487:     IS        isOriginal;
1488:     PetscInt  n, size, nleaves;
1489:     PetscInt  *numbering_orig, *numbering_new;

1491:     /* Get the original point numbering */
1492:     DMPlexCreatePointNumbering(dm, &isOriginal);
1493:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1494:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1495:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1496:     /* Convert to positive global numbers */
1497:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1498:     /* Derive the new local-to-global mapping from the old one */
1499:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1500:     PetscMalloc1(nleaves, &numbering_new);
1501:     PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1502:     PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1503:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1504:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1505:     ISDestroy(&isOriginal);
1506:   } else {
1507:     /* One-to-all distribution pattern: We can derive LToG from SF */
1508:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1509:   }
1510:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1511:   if (flg) {
1512:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1513:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1514:   }
1515:   /* Migrate DM data to target DM */
1516:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1517:   DMPlexDistributeLabels(dm, sf, targetDM);
1518:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1519:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1520:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1521:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1522:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1523:   return 0;
1524: }

1526: /*@C
1527:   DMPlexDistribute - Distributes the mesh and any associated sections.

1529:   Collective on dm

1531:   Input Parameters:
1532: + dm  - The original DMPlex object
1533: - overlap - The overlap of partitions, 0 is the default

1535:   Output Parameters:
1536: + sf - The PetscSF used for point distribution, or NULL if not needed
1537: - dmParallel - The distributed DMPlex object

1539:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1541:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1542:   representation on the mesh.

1544:   Level: intermediate

1546: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1547: @*/
1548: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1549: {
1550:   MPI_Comm               comm;
1551:   PetscPartitioner       partitioner;
1552:   IS                     cellPart;
1553:   PetscSection           cellPartSection;
1554:   DM                     dmCoord;
1555:   DMLabel                lblPartition, lblMigration;
1556:   PetscSF                sfMigration, sfStratified, sfPoint;
1557:   PetscBool              flg, balance;
1558:   PetscMPIInt            rank, size;


1565:   if (sf) *sf = NULL;
1566:   *dmParallel = NULL;
1567:   PetscObjectGetComm((PetscObject)dm,&comm);
1568:   MPI_Comm_rank(comm, &rank);
1569:   MPI_Comm_size(comm, &size);
1570:   if (size == 1) return 0;

1572:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1573:   /* Create cell partition */
1574:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1575:   PetscSectionCreate(comm, &cellPartSection);
1576:   DMPlexGetPartitioner(dm, &partitioner);
1577:   PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1578:   PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1579:   {
1580:     /* Convert partition to DMLabel */
1581:     IS             is;
1582:     PetscHSetI     ht;
1583:     const PetscInt *points;
1584:     PetscInt       *iranks;
1585:     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;

1587:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1588:     /* Preallocate strata */
1589:     PetscHSetICreate(&ht);
1590:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1591:     for (proc = pStart; proc < pEnd; proc++) {
1592:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1593:       if (npoints) PetscHSetIAdd(ht, proc);
1594:     }
1595:     PetscHSetIGetSize(ht, &nranks);
1596:     PetscMalloc1(nranks, &iranks);
1597:     PetscHSetIGetElems(ht, &poff, iranks);
1598:     PetscHSetIDestroy(&ht);
1599:     DMLabelAddStrata(lblPartition, nranks, iranks);
1600:     PetscFree(iranks);
1601:     /* Inline DMPlexPartitionLabelClosure() */
1602:     ISGetIndices(cellPart, &points);
1603:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1604:     for (proc = pStart; proc < pEnd; proc++) {
1605:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1606:       if (!npoints) continue;
1607:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1608:       DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1609:       DMLabelSetStratumIS(lblPartition, proc, is);
1610:       ISDestroy(&is);
1611:     }
1612:     ISRestoreIndices(cellPart, &points);
1613:   }
1614:   PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);

1616:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1617:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1618:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1619:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1620:   PetscSFDestroy(&sfMigration);
1621:   sfMigration = sfStratified;
1622:   PetscSFSetUp(sfMigration);
1623:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1624:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1625:   if (flg) {
1626:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1627:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1628:   }

1630:   /* Create non-overlapping parallel DM and migrate internal data */
1631:   DMPlexCreate(comm, dmParallel);
1632:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1633:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1635:   /* Build the point SF without overlap */
1636:   DMPlexGetPartitionBalance(dm, &balance);
1637:   DMPlexSetPartitionBalance(*dmParallel, balance);
1638:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1639:   DMSetPointSF(*dmParallel, sfPoint);
1640:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1641:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
1642:   if (flg) PetscSFView(sfPoint, NULL);

1644:   if (overlap > 0) {
1645:     DM                 dmOverlap;
1646:     PetscInt           nroots, nleaves, noldleaves, l;
1647:     const PetscInt    *oldLeaves;
1648:     PetscSFNode       *newRemote, *permRemote;
1649:     const PetscSFNode *oldRemote;
1650:     PetscSF            sfOverlap, sfOverlapPoint;

1652:     /* Add the partition overlap to the distributed DM */
1653:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1654:     DMDestroy(dmParallel);
1655:     *dmParallel = dmOverlap;
1656:     if (flg) {
1657:       PetscPrintf(comm, "Overlap Migration SF:\n");
1658:       PetscSFView(sfOverlap, NULL);
1659:     }

1661:     /* Re-map the migration SF to establish the full migration pattern */
1662:     PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1663:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1664:     PetscMalloc1(nleaves, &newRemote);
1665:     /* oldRemote: original root point mapping to original leaf point
1666:        newRemote: original leaf point mapping to overlapped leaf point */
1667:     if (oldLeaves) {
1668:       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1669:       PetscMalloc1(noldleaves, &permRemote);
1670:       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1671:       oldRemote = permRemote;
1672:     }
1673:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1674:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1675:     if (oldLeaves) PetscFree(oldRemote);
1676:     PetscSFCreate(comm, &sfOverlapPoint);
1677:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1678:     PetscSFDestroy(&sfOverlap);
1679:     PetscSFDestroy(&sfMigration);
1680:     sfMigration = sfOverlapPoint;
1681:   }
1682:   /* Cleanup Partition */
1683:   DMLabelDestroy(&lblPartition);
1684:   DMLabelDestroy(&lblMigration);
1685:   PetscSectionDestroy(&cellPartSection);
1686:   ISDestroy(&cellPart);
1687:   /* Copy discretization */
1688:   DMCopyDisc(dm, *dmParallel);
1689:   /* Create sfNatural */
1690:   if (dm->useNatural) {
1691:     PetscSection section;

1693:     DMGetLocalSection(dm, &section);
1694:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1695:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1696:   }
1697:   DMPlexCopy_Internal(dm, PETSC_TRUE, *dmParallel);
1698:   /* Cleanup */
1699:   if (sf) {*sf = sfMigration;}
1700:   else    PetscSFDestroy(&sfMigration);
1701:   PetscSFDestroy(&sfPoint);
1702:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1703:   return 0;
1704: }

1706: /*@C
1707:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1709:   Collective on dm

1711:   Input Parameters:
1712: + dm  - The non-overlapping distributed DMPlex object
1713: - overlap - The overlap of partitions (the same on all ranks)

1715:   Output Parameters:
1716: + sf - The PetscSF used for point distribution
1717: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1719:   Notes:
1720:   If the mesh was not distributed, the return value is NULL.

1722:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1723:   representation on the mesh.

1725:   Level: advanced

1727: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1728: @*/
1729: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1730: {
1731:   MPI_Comm               comm;
1732:   PetscMPIInt            size, rank;
1733:   PetscSection           rootSection, leafSection;
1734:   IS                     rootrank, leafrank;
1735:   DM                     dmCoord;
1736:   DMLabel                lblOverlap;
1737:   PetscSF                sfOverlap, sfStratified, sfPoint;


1744:   if (sf) *sf = NULL;
1745:   *dmOverlap  = NULL;
1746:   PetscObjectGetComm((PetscObject)dm,&comm);
1747:   MPI_Comm_size(comm, &size);
1748:   MPI_Comm_rank(comm, &rank);
1749:   if (size == 1) return 0;

1751:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1752:   /* Compute point overlap with neighbouring processes on the distributed DM */
1753:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1754:   PetscSectionCreate(comm, &rootSection);
1755:   PetscSectionCreate(comm, &leafSection);
1756:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1757:   DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1758:   /* Convert overlap label to stratified migration SF */
1759:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1760:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1761:   PetscSFDestroy(&sfOverlap);
1762:   sfOverlap = sfStratified;
1763:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1764:   PetscSFSetFromOptions(sfOverlap);

1766:   PetscSectionDestroy(&rootSection);
1767:   PetscSectionDestroy(&leafSection);
1768:   ISDestroy(&rootrank);
1769:   ISDestroy(&leafrank);
1770:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1772:   /* Build the overlapping DM */
1773:   DMPlexCreate(comm, dmOverlap);
1774:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1775:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1776:   /* Store the overlap in the new DM */
1777:   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1778:   /* Build the new point SF */
1779:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1780:   DMSetPointSF(*dmOverlap, sfPoint);
1781:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1782:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
1783:   PetscSFDestroy(&sfPoint);
1784:   /* Cleanup overlap partition */
1785:   DMLabelDestroy(&lblOverlap);
1786:   if (sf) *sf = sfOverlap;
1787:   else    PetscSFDestroy(&sfOverlap);
1788:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1789:   return 0;
1790: }

1792: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1793: {
1794:   DM_Plex        *mesh  = (DM_Plex*) dm->data;

1796:   *overlap = mesh->overlap;
1797:   return 0;
1798: }

1800: /*@
1801:   DMPlexGetOverlap - Get the DMPlex partition overlap.

1803:   Not collective

1805:   Input Parameter:
1806: . dm - The DM

1808:   Output Parameter:
1809: . overlap - The overlap of this DM

1811:   Level: intermediate

1813: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1814: @*/
1815: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1816: {
1818:   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1819:   return 0;
1820: }

1822: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
1823: {
1824:   DM_Plex *mesh = (DM_Plex *) dm->data;

1826:   mesh->distDefault = dist;
1827:   return 0;
1828: }

1830: /*@
1831:   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default

1833:   Logically collective

1835:   Input Parameters:
1836: + dm   - The DM
1837: - dist - Flag for distribution

1839:   Level: intermediate

1841: .seealso: DMPlexDistributeGetDefault(), DMPlexDistribute()
1842: @*/
1843: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
1844: {
1847:   PetscTryMethod(dm,"DMPlexDistributeSetDefault_C",(DM,PetscBool),(dm,dist));
1848:   return 0;
1849: }

1851: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
1852: {
1853:   DM_Plex *mesh = (DM_Plex *) dm->data;

1855:   *dist = mesh->distDefault;
1856:   return 0;
1857: }

1859: /*@
1860:   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default

1862:   Not collective

1864:   Input Parameter:
1865: . dm   - The DM

1867:   Output Parameter:
1868: . dist - Flag for distribution

1870:   Level: intermediate

1872: .seealso: DMPlexDistributeSetDefault(), DMPlexDistribute()
1873: @*/
1874: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
1875: {
1878:   PetscUseMethod(dm,"DMPlexDistributeGetDefault_C",(DM,PetscBool*),(dm,dist));
1879:   return 0;
1880: }

1882: /*@C
1883:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1884:   root process of the original's communicator.

1886:   Collective on dm

1888:   Input Parameter:
1889: . dm - the original DMPlex object

1891:   Output Parameters:
1892: + sf - the PetscSF used for point distribution (optional)
1893: - gatherMesh - the gathered DM object, or NULL

1895:   Level: intermediate

1897: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1898: @*/
1899: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1900: {
1901:   MPI_Comm       comm;
1902:   PetscMPIInt    size;
1903:   PetscPartitioner oldPart, gatherPart;

1907:   *gatherMesh = NULL;
1908:   if (sf) *sf = NULL;
1909:   comm = PetscObjectComm((PetscObject)dm);
1910:   MPI_Comm_size(comm,&size);
1911:   if (size == 1) return 0;
1912:   DMPlexGetPartitioner(dm,&oldPart);
1913:   PetscObjectReference((PetscObject)oldPart);
1914:   PetscPartitionerCreate(comm,&gatherPart);
1915:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1916:   DMPlexSetPartitioner(dm,gatherPart);
1917:   DMPlexDistribute(dm,0,sf,gatherMesh);

1919:   DMPlexSetPartitioner(dm,oldPart);
1920:   PetscPartitionerDestroy(&gatherPart);
1921:   PetscPartitionerDestroy(&oldPart);
1922:   return 0;
1923: }

1925: /*@C
1926:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1928:   Collective on dm

1930:   Input Parameter:
1931: . dm - the original DMPlex object

1933:   Output Parameters:
1934: + sf - the PetscSF used for point distribution (optional)
1935: - redundantMesh - the redundant DM object, or NULL

1937:   Level: intermediate

1939: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1940: @*/
1941: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1942: {
1943:   MPI_Comm       comm;
1944:   PetscMPIInt    size, rank;
1945:   PetscInt       pStart, pEnd, p;
1946:   PetscInt       numPoints = -1;
1947:   PetscSF        migrationSF, sfPoint, gatherSF;
1948:   DM             gatherDM, dmCoord;
1949:   PetscSFNode    *points;

1953:   *redundantMesh = NULL;
1954:   comm = PetscObjectComm((PetscObject)dm);
1955:   MPI_Comm_size(comm,&size);
1956:   if (size == 1) {
1957:     PetscObjectReference((PetscObject) dm);
1958:     *redundantMesh = dm;
1959:     if (sf) *sf = NULL;
1960:     return 0;
1961:   }
1962:   DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1963:   if (!gatherDM) return 0;
1964:   MPI_Comm_rank(comm,&rank);
1965:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1966:   numPoints = pEnd - pStart;
1967:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1968:   PetscMalloc1(numPoints,&points);
1969:   PetscSFCreate(comm,&migrationSF);
1970:   for (p = 0; p < numPoints; p++) {
1971:     points[p].index = p;
1972:     points[p].rank  = 0;
1973:   }
1974:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1975:   DMPlexCreate(comm, redundantMesh);
1976:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1977:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1978:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1979:   DMSetPointSF(*redundantMesh, sfPoint);
1980:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1981:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
1982:   PetscSFDestroy(&sfPoint);
1983:   if (sf) {
1984:     PetscSF tsf;

1986:     PetscSFCompose(gatherSF,migrationSF,&tsf);
1987:     DMPlexStratifyMigrationSF(dm, tsf, sf);
1988:     PetscSFDestroy(&tsf);
1989:   }
1990:   PetscSFDestroy(&migrationSF);
1991:   PetscSFDestroy(&gatherSF);
1992:   DMDestroy(&gatherDM);
1993:   DMCopyDisc(dm, *redundantMesh);
1994:   DMPlexCopy_Internal(dm, PETSC_TRUE, *redundantMesh);
1995:   return 0;
1996: }

1998: /*@
1999:   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.

2001:   Collective

2003:   Input Parameter:
2004: . dm      - The DM object

2006:   Output Parameter:
2007: . distributed - Flag whether the DM is distributed

2009:   Level: intermediate

2011:   Notes:
2012:   This currently finds out whether at least two ranks have any DAG points.
2013:   This involves MPI_Allreduce() with one integer.
2014:   The result is currently not stashed so every call to this routine involves this global communication.

2016: .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2017: @*/
2018: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2019: {
2020:   PetscInt          pStart, pEnd, count;
2021:   MPI_Comm          comm;
2022:   PetscMPIInt       size;

2026:   PetscObjectGetComm((PetscObject)dm,&comm);
2027:   MPI_Comm_size(comm,&size);
2028:   if (size == 1) { *distributed = PETSC_FALSE; return 0; }
2029:   DMPlexGetChart(dm, &pStart, &pEnd);
2030:   count = (pEnd - pStart) > 0 ? 1 : 0;
2031:   MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2032:   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2033:   return 0;
2034: }