Actual source code: plexdistribute.c

petsc-3.14.6 2021-03-30
Report Typos and Errors
  1: #include <petsc/private/dmpleximpl.h>
  2: #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 27:   mesh->useradjacency = user;
 28:   mesh->useradjacencyctx = ctx;
 29:   return(0);
 30: }

 32: /*@C
 33:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 35:   Input Parameter:
 36: . dm      - The DM object

 38:   Output Parameters:
 39: - user    - The user callback
 40: - ctx     - context for callback evaluation

 42:   Level: advanced

 44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
 45: @*/
 46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
 47: {
 48:   DM_Plex *mesh = (DM_Plex *)dm->data;

 52:   if (user) *user = mesh->useradjacency;
 53:   if (ctx) *ctx = mesh->useradjacencyctx;
 54:   return(0);
 55: }

 57: /*@
 58:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 60:   Input Parameters:
 61: + dm      - The DM object
 62: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 64:   Level: intermediate

 66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 67: @*/
 68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 69: {
 70:   DM_Plex *mesh = (DM_Plex *) dm->data;

 74:   mesh->useAnchors = useAnchors;
 75:   return(0);
 76: }

 78: /*@
 79:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 81:   Input Parameter:
 82: . dm      - The DM object

 84:   Output Parameter:
 85: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 87:   Level: intermediate

 89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
 90: @*/
 91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 92: {
 93:   DM_Plex *mesh = (DM_Plex *) dm->data;

 98:   *useAnchors = mesh->useAnchors;
 99:   return(0);
100: }

102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104:   const PetscInt *cone = NULL;
105:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106:   PetscErrorCode  ierr;

109:   DMPlexGetConeSize(dm, p, &coneSize);
110:   DMPlexGetCone(dm, p, &cone);
111:   for (c = 0; c <= coneSize; ++c) {
112:     const PetscInt  point   = !c ? p : cone[c-1];
113:     const PetscInt *support = NULL;
114:     PetscInt        supportSize, s, q;

116:     DMPlexGetSupportSize(dm, point, &supportSize);
117:     DMPlexGetSupport(dm, point, &support);
118:     for (s = 0; s < supportSize; ++s) {
119:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120:         if (support[s] == adj[q]) break;
121:       }
122:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123:     }
124:   }
125:   *adjSize = numAdj;
126:   return(0);
127: }

129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131:   const PetscInt *support = NULL;
132:   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
133:   PetscErrorCode  ierr;

136:   DMPlexGetSupportSize(dm, p, &supportSize);
137:   DMPlexGetSupport(dm, p, &support);
138:   for (s = 0; s <= supportSize; ++s) {
139:     const PetscInt  point = !s ? p : support[s-1];
140:     const PetscInt *cone  = NULL;
141:     PetscInt        coneSize, c, q;

143:     DMPlexGetConeSize(dm, point, &coneSize);
144:     DMPlexGetCone(dm, point, &cone);
145:     for (c = 0; c < coneSize; ++c) {
146:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147:         if (cone[c] == adj[q]) break;
148:       }
149:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150:     }
151:   }
152:   *adjSize = numAdj;
153:   return(0);
154: }

156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158:   PetscInt      *star = NULL;
159:   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;

163:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164:   for (s = 0; s < starSize*2; s += 2) {
165:     const PetscInt *closure = NULL;
166:     PetscInt        closureSize, c, q;

168:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169:     for (c = 0; c < closureSize*2; c += 2) {
170:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171:         if (closure[c] == adj[q]) break;
172:       }
173:       if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174:     }
175:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176:   }
177:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178:   *adjSize = numAdj;
179:   return(0);
180: }

182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184:   static PetscInt asiz = 0;
185:   PetscInt maxAnchors = 1;
186:   PetscInt aStart = -1, aEnd = -1;
187:   PetscInt maxAdjSize;
188:   PetscSection aSec = NULL;
189:   IS aIS = NULL;
190:   const PetscInt *anchors;
191:   DM_Plex *mesh = (DM_Plex *)dm->data;
192:   PetscErrorCode  ierr;

195:   if (useAnchors) {
196:     DMPlexGetAnchors(dm,&aSec,&aIS);
197:     if (aSec) {
198:       PetscSectionGetMaxDof(aSec,&maxAnchors);
199:       maxAnchors = PetscMax(1,maxAnchors);
200:       PetscSectionGetChart(aSec,&aStart,&aEnd);
201:       ISGetIndices(aIS,&anchors);
202:     }
203:   }
204:   if (!*adj) {
205:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

207:     DMPlexGetChart(dm, &pStart,&pEnd);
208:     DMPlexGetDepth(dm, &depth);
209:     depth = PetscMax(depth, -depth);
210:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
211:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
212:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
213:     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
214:     asiz *= maxAnchors;
215:     asiz  = PetscMin(asiz,pEnd-pStart);
216:     PetscMalloc1(asiz,adj);
217:   }
218:   if (*adjSize < 0) *adjSize = asiz;
219:   maxAdjSize = *adjSize;
220:   if (mesh->useradjacency) {
221:     (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
222:   } else if (useTransitiveClosure) {
223:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
224:   } else if (useCone) {
225:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
226:   } else {
227:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
228:   }
229:   if (useAnchors && aSec) {
230:     PetscInt origSize = *adjSize;
231:     PetscInt numAdj = origSize;
232:     PetscInt i = 0, j;
233:     PetscInt *orig = *adj;

235:     while (i < origSize) {
236:       PetscInt p = orig[i];
237:       PetscInt aDof = 0;

239:       if (p >= aStart && p < aEnd) {
240:         PetscSectionGetDof(aSec,p,&aDof);
241:       }
242:       if (aDof) {
243:         PetscInt aOff;
244:         PetscInt s, q;

246:         for (j = i + 1; j < numAdj; j++) {
247:           orig[j - 1] = orig[j];
248:         }
249:         origSize--;
250:         numAdj--;
251:         PetscSectionGetOffset(aSec,p,&aOff);
252:         for (s = 0; s < aDof; ++s) {
253:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
254:             if (anchors[aOff+s] == orig[q]) break;
255:           }
256:           if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
257:         }
258:       }
259:       else {
260:         i++;
261:       }
262:     }
263:     *adjSize = numAdj;
264:     ISRestoreIndices(aIS,&anchors);
265:   }
266:   return(0);
267: }

269: /*@
270:   DMPlexGetAdjacency - Return all points adjacent to the given point

272:   Input Parameters:
273: + dm - The DM object
274: . p  - The point
275: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
276: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize

278:   Output Parameters:
279: + adjSize - The number of adjacent points
280: - adj - The adjacent points

282:   Level: advanced

284:   Notes:
285:     The user must PetscFree the adj array if it was not passed in.

287: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
288: @*/
289: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
290: {
291:   PetscBool      useCone, useClosure, useAnchors;

298:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
299:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
300:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
301:   return(0);
302: }

304: /*@
305:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

307:   Collective on dm

309:   Input Parameters:
310: + dm      - The DM
311: - sfPoint - The PetscSF which encodes point connectivity

313:   Output Parameters:
314: + processRanks - A list of process neighbors, or NULL
315: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

317:   Level: developer

319: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
320: @*/
321: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
322: {
323:   const PetscSFNode *remotePoints;
324:   PetscInt          *localPointsNew;
325:   PetscSFNode       *remotePointsNew;
326:   const PetscInt    *nranks;
327:   PetscInt          *ranksNew;
328:   PetscBT            neighbors;
329:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
330:   PetscMPIInt        size, proc, rank;
331:   PetscErrorCode     ierr;

338:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
339:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
340:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
341:   PetscBTCreate(size, &neighbors);
342:   PetscBTMemzero(size, neighbors);
343:   /* Compute root-to-leaf process connectivity */
344:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
345:   ISGetIndices(rootRanks, &nranks);
346:   for (p = pStart; p < pEnd; ++p) {
347:     PetscInt ndof, noff, n;

349:     PetscSectionGetDof(rootRankSection, p, &ndof);
350:     PetscSectionGetOffset(rootRankSection, p, &noff);
351:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
352:   }
353:   ISRestoreIndices(rootRanks, &nranks);
354:   /* Compute leaf-to-neighbor process connectivity */
355:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
356:   ISGetIndices(leafRanks, &nranks);
357:   for (p = pStart; p < pEnd; ++p) {
358:     PetscInt ndof, noff, n;

360:     PetscSectionGetDof(leafRankSection, p, &ndof);
361:     PetscSectionGetOffset(leafRankSection, p, &noff);
362:     for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
363:   }
364:   ISRestoreIndices(leafRanks, &nranks);
365:   /* Compute leaf-to-root process connectivity */
366:   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
367:   /* Calculate edges */
368:   PetscBTClear(neighbors, rank);
369:   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
370:   PetscMalloc1(numNeighbors, &ranksNew);
371:   PetscMalloc1(numNeighbors, &localPointsNew);
372:   PetscMalloc1(numNeighbors, &remotePointsNew);
373:   for (proc = 0, n = 0; proc < size; ++proc) {
374:     if (PetscBTLookup(neighbors, proc)) {
375:       ranksNew[n]              = proc;
376:       localPointsNew[n]        = proc;
377:       remotePointsNew[n].index = rank;
378:       remotePointsNew[n].rank  = proc;
379:       ++n;
380:     }
381:   }
382:   PetscBTDestroy(&neighbors);
383:   if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
384:   else              {PetscFree(ranksNew);}
385:   if (sfProcess) {
386:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
387:     PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
388:     PetscSFSetFromOptions(*sfProcess);
389:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
390:   }
391:   return(0);
392: }

394: /*@
395:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

397:   Collective on dm

399:   Input Parameter:
400: . dm - The DM

402:   Output Parameters:
403: + rootSection - The number of leaves for a given root point
404: . rootrank    - The rank of each edge into the root point
405: . leafSection - The number of processes sharing a given leaf point
406: - leafrank    - The rank of each process sharing a leaf point

408:   Level: developer

410: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
411: @*/
412: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
413: {
414:   MPI_Comm        comm;
415:   PetscSF         sfPoint;
416:   const PetscInt *rootdegree;
417:   PetscInt       *myrank, *remoterank;
418:   PetscInt        pStart, pEnd, p, nedges;
419:   PetscMPIInt     rank;
420:   PetscErrorCode  ierr;

423:   PetscObjectGetComm((PetscObject) dm, &comm);
424:   MPI_Comm_rank(comm, &rank);
425:   DMPlexGetChart(dm, &pStart, &pEnd);
426:   DMGetPointSF(dm, &sfPoint);
427:   /* Compute number of leaves for each root */
428:   PetscObjectSetName((PetscObject) rootSection, "Root Section");
429:   PetscSectionSetChart(rootSection, pStart, pEnd);
430:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
431:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
432:   for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
433:   PetscSectionSetUp(rootSection);
434:   /* Gather rank of each leaf to root */
435:   PetscSectionGetStorageSize(rootSection, &nedges);
436:   PetscMalloc1(pEnd-pStart, &myrank);
437:   PetscMalloc1(nedges,  &remoterank);
438:   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
439:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
440:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
441:   PetscFree(myrank);
442:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
443:   /* Distribute remote ranks to leaves */
444:   PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
445:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
446:   return(0);
447: }

449: /*@C
450:   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.

452:   Collective on dm

454:   Input Parameters:
455: + dm          - The DM
456: . levels      - Number of overlap levels
457: . rootSection - The number of leaves for a given root point
458: . rootrank    - The rank of each edge into the root point
459: . leafSection - The number of processes sharing a given leaf point
460: - leafrank    - The rank of each process sharing a leaf point

462:   Output Parameter:
463: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

465:   Level: developer

467: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
468: @*/
469: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
470: {
471:   MPI_Comm           comm;
472:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
473:   PetscSF            sfPoint;
474:   const PetscSFNode *remote;
475:   const PetscInt    *local;
476:   const PetscInt    *nrank, *rrank;
477:   PetscInt          *adj = NULL;
478:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
479:   PetscMPIInt        rank, size;
480:   PetscBool          flg;
481:   PetscErrorCode     ierr;

484:   *ovLabel = NULL;
485:   PetscObjectGetComm((PetscObject) dm, &comm);
486:   MPI_Comm_size(comm, &size);
487:   MPI_Comm_rank(comm, &rank);
488:   if (size == 1) return(0);
489:   DMGetPointSF(dm, &sfPoint);
490:   DMPlexGetChart(dm, &pStart, &pEnd);
491:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
492:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
493:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
494:   /* Handle leaves: shared with the root point */
495:   for (l = 0; l < nleaves; ++l) {
496:     PetscInt adjSize = PETSC_DETERMINE, a;

498:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
499:     for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
500:   }
501:   ISGetIndices(rootrank, &rrank);
502:   ISGetIndices(leafrank, &nrank);
503:   /* Handle roots */
504:   for (p = pStart; p < pEnd; ++p) {
505:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

507:     if ((p >= sStart) && (p < sEnd)) {
508:       /* Some leaves share a root with other leaves on different processes */
509:       PetscSectionGetDof(leafSection, p, &neighbors);
510:       if (neighbors) {
511:         PetscSectionGetOffset(leafSection, p, &noff);
512:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
513:         for (n = 0; n < neighbors; ++n) {
514:           const PetscInt remoteRank = nrank[noff+n];

516:           if (remoteRank == rank) continue;
517:           for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
518:         }
519:       }
520:     }
521:     /* Roots are shared with leaves */
522:     PetscSectionGetDof(rootSection, p, &neighbors);
523:     if (!neighbors) continue;
524:     PetscSectionGetOffset(rootSection, p, &noff);
525:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
526:     for (n = 0; n < neighbors; ++n) {
527:       const PetscInt remoteRank = rrank[noff+n];

529:       if (remoteRank == rank) continue;
530:       for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
531:     }
532:   }
533:   PetscFree(adj);
534:   ISRestoreIndices(rootrank, &rrank);
535:   ISRestoreIndices(leafrank, &nrank);
536:   /* Add additional overlap levels */
537:   for (l = 1; l < levels; l++) {
538:     /* Propagate point donations over SF to capture remote connections */
539:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
540:     /* Add next level of point donations to the label */
541:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
542:   }
543:   /* We require the closure in the overlap */
544:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
545:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
546:   if (flg) {
547:     PetscViewer viewer;
548:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
549:     DMLabelView(ovAdjByRank, viewer);
550:   }
551:   /* Invert sender to receiver label */
552:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
553:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
554:   /* Add owned points, except for shared local points */
555:   for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
556:   for (l = 0; l < nleaves; ++l) {
557:     DMLabelClearValue(*ovLabel, local[l], rank);
558:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
559:   }
560:   /* Clean up */
561:   DMLabelDestroy(&ovAdjByRank);
562:   return(0);
563: }

565: /*@C
566:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

568:   Collective on dm

570:   Input Parameters:
571: + dm          - The DM
572: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

574:   Output Parameter:
575: . migrationSF - An SF that maps original points in old locations to points in new locations

577:   Level: developer

579: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
580: @*/
581: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
582: {
583:   MPI_Comm           comm;
584:   PetscMPIInt        rank, size;
585:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
586:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
587:   PetscInt          *depthRecv, *depthShift, *depthIdx;
588:   PetscSFNode       *iremote;
589:   PetscSF            pointSF;
590:   const PetscInt    *sharedLocal;
591:   const PetscSFNode *overlapRemote, *sharedRemote;
592:   PetscErrorCode     ierr;

596:   PetscObjectGetComm((PetscObject)dm, &comm);
597:   MPI_Comm_rank(comm, &rank);
598:   MPI_Comm_size(comm, &size);
599:   DMGetDimension(dm, &dim);

601:   /* Before building the migration SF we need to know the new stratum offsets */
602:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
603:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
604:   for (d=0; d<dim+1; d++) {
605:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
606:     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
607:   }
608:   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
609:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
610:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);

612:   /* Count received points in each stratum and compute the internal strata shift */
613:   PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
614:   for (d=0; d<dim+1; d++) depthRecv[d]=0;
615:   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
616:   depthShift[dim] = 0;
617:   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
618:   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
619:   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
620:   for (d=0; d<dim+1; d++) {
621:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
622:     depthIdx[d] = pStart + depthShift[d];
623:   }

625:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
626:   DMPlexGetChart(dm, &pStart, &pEnd);
627:   newLeaves = pEnd - pStart + nleaves;
628:   PetscMalloc1(newLeaves, &ilocal);
629:   PetscMalloc1(newLeaves, &iremote);
630:   /* First map local points to themselves */
631:   for (d=0; d<dim+1; d++) {
632:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
633:     for (p=pStart; p<pEnd; p++) {
634:       point = p + depthShift[d];
635:       ilocal[point] = point;
636:       iremote[point].index = p;
637:       iremote[point].rank = rank;
638:       depthIdx[d]++;
639:     }
640:   }

642:   /* Add in the remote roots for currently shared points */
643:   DMGetPointSF(dm, &pointSF);
644:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
645:   for (d=0; d<dim+1; d++) {
646:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
647:     for (p=0; p<numSharedPoints; p++) {
648:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
649:         point = sharedLocal[p] + depthShift[d];
650:         iremote[point].index = sharedRemote[p].index;
651:         iremote[point].rank = sharedRemote[p].rank;
652:       }
653:     }
654:   }

656:   /* Now add the incoming overlap points */
657:   for (p=0; p<nleaves; p++) {
658:     point = depthIdx[remoteDepths[p]];
659:     ilocal[point] = point;
660:     iremote[point].index = overlapRemote[p].index;
661:     iremote[point].rank = overlapRemote[p].rank;
662:     depthIdx[remoteDepths[p]]++;
663:   }
664:   PetscFree2(pointDepths,remoteDepths);

666:   PetscSFCreate(comm, migrationSF);
667:   PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
668:   PetscSFSetFromOptions(*migrationSF);
669:   DMPlexGetChart(dm, &pStart, &pEnd);
670:   PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

672:   PetscFree3(depthRecv, depthShift, depthIdx);
673:   return(0);
674: }

676: /*@
677:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

679:   Input Parameters:
680: + dm          - The DM
681: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

683:   Output Parameter:
684: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

686:   Note:
687:   This lexicographically sorts by (depth, cellType)

689:   Level: developer

691: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
692: @*/
693: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
694: {
695:   MPI_Comm           comm;
696:   PetscMPIInt        rank, size;
697:   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
698:   PetscSFNode       *pointDepths, *remoteDepths;
699:   PetscInt          *ilocal;
700:   PetscInt          *depthRecv, *depthShift, *depthIdx;
701:   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
702:   const PetscSFNode *iremote;
703:   PetscErrorCode     ierr;

707:   PetscObjectGetComm((PetscObject) dm, &comm);
708:   MPI_Comm_rank(comm, &rank);
709:   MPI_Comm_size(comm, &size);
710:   DMPlexGetDepth(dm, &ldepth);
711:   DMGetDimension(dm, &dim);
712:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
713:   if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
714:   PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);

716:   /* Before building the migration SF we need to know the new stratum offsets */
717:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
718:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
719:   for (d = 0; d < depth+1; ++d) {
720:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
721:     for (p = pStart; p < pEnd; ++p) {
722:       DMPolytopeType ct;

724:       DMPlexGetCellType(dm, p, &ct);
725:       pointDepths[p].index = d;
726:       pointDepths[p].rank  = ct;
727:     }
728:   }
729:   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
730:   PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths);
731:   PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths);
732:   /* Count received points in each stratum and compute the internal strata shift */
733:   PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx);
734:   for (p = 0; p < nleaves; ++p) {
735:     if (remoteDepths[p].rank < 0) {
736:       ++depthRecv[remoteDepths[p].index];
737:     } else {
738:       ++ctRecv[remoteDepths[p].rank];
739:     }
740:   }
741:   {
742:     PetscInt depths[4], dims[4], shift = 0, i, c;

744:     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
745:          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
746:      */
747:     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
748:     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
749:     for (i = 0; i <= depth; ++i) {
750:       const PetscInt dep = depths[i];
751:       const PetscInt dim = dims[i];

753:       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
754:         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
755:         ctShift[c] = shift;
756:         shift     += ctRecv[c];
757:       }
758:       depthShift[dep] = shift;
759:       shift          += depthRecv[dep];
760:     }
761:     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
762:       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);

764:       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
765:         ctShift[c] = shift;
766:         shift     += ctRecv[c];
767:       }
768:     }
769:   }
770:   /* Derive a new local permutation based on stratified indices */
771:   PetscMalloc1(nleaves, &ilocal);
772:   for (p = 0; p < nleaves; ++p) {
773:     const PetscInt       dep = remoteDepths[p].index;
774:     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;

776:     if ((PetscInt) ct < 0) {
777:       ilocal[p] = depthShift[dep] + depthIdx[dep];
778:       ++depthIdx[dep];
779:     } else {
780:       ilocal[p] = ctShift[ct] + ctIdx[ct];
781:       ++ctIdx[ct];
782:     }
783:   }
784:   PetscSFCreate(comm, migrationSF);
785:   PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
786:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
787:   PetscFree2(pointDepths,remoteDepths);
788:   PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx);
789:   PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
790:   return(0);
791: }

793: /*@
794:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

796:   Collective on dm

798:   Input Parameters:
799: + dm - The DMPlex object
800: . pointSF - The PetscSF describing the communication pattern
801: . originalSection - The PetscSection for existing data layout
802: - originalVec - The existing data in a local vector

804:   Output Parameters:
805: + newSection - The PetscSF describing the new data layout
806: - newVec - The new data in a local vector

808:   Level: developer

810: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
811: @*/
812: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
813: {
814:   PetscSF        fieldSF;
815:   PetscInt      *remoteOffsets, fieldSize;
816:   PetscScalar   *originalValues, *newValues;

820:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
821:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

823:   PetscSectionGetStorageSize(newSection, &fieldSize);
824:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
825:   VecSetType(newVec,dm->vectype);

827:   VecGetArray(originalVec, &originalValues);
828:   VecGetArray(newVec, &newValues);
829:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
830:   PetscFree(remoteOffsets);
831:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
832:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
833:   PetscSFDestroy(&fieldSF);
834:   VecRestoreArray(newVec, &newValues);
835:   VecRestoreArray(originalVec, &originalValues);
836:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
837:   return(0);
838: }

840: /*@
841:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

843:   Collective on dm

845:   Input Parameters:
846: + dm - The DMPlex object
847: . pointSF - The PetscSF describing the communication pattern
848: . originalSection - The PetscSection for existing data layout
849: - originalIS - The existing data

851:   Output Parameters:
852: + newSection - The PetscSF describing the new data layout
853: - newIS - The new data

855:   Level: developer

857: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
858: @*/
859: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
860: {
861:   PetscSF         fieldSF;
862:   PetscInt       *newValues, *remoteOffsets, fieldSize;
863:   const PetscInt *originalValues;
864:   PetscErrorCode  ierr;

867:   PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
868:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

870:   PetscSectionGetStorageSize(newSection, &fieldSize);
871:   PetscMalloc1(fieldSize, &newValues);

873:   ISGetIndices(originalIS, &originalValues);
874:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
875:   PetscFree(remoteOffsets);
876:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
877:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
878:   PetscSFDestroy(&fieldSF);
879:   ISRestoreIndices(originalIS, &originalValues);
880:   ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
881:   PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
882:   return(0);
883: }

885: /*@
886:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

888:   Collective on dm

890:   Input Parameters:
891: + dm - The DMPlex object
892: . pointSF - The PetscSF describing the communication pattern
893: . originalSection - The PetscSection for existing data layout
894: . datatype - The type of data
895: - originalData - The existing data

897:   Output Parameters:
898: + newSection - The PetscSection describing the new data layout
899: - newData - The new data

901:   Level: developer

903: .seealso: DMPlexDistribute(), DMPlexDistributeField()
904: @*/
905: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
906: {
907:   PetscSF        fieldSF;
908:   PetscInt      *remoteOffsets, fieldSize;
909:   PetscMPIInt    dataSize;

913:   PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
914:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

916:   PetscSectionGetStorageSize(newSection, &fieldSize);
917:   MPI_Type_size(datatype, &dataSize);
918:   PetscMalloc(fieldSize * dataSize, newData);

920:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
921:   PetscFree(remoteOffsets);
922:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
923:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
924:   PetscSFDestroy(&fieldSF);
925:   PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
926:   return(0);
927: }

929: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
930: {
931:   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
932:   MPI_Comm               comm;
933:   PetscSF                coneSF;
934:   PetscSection           originalConeSection, newConeSection;
935:   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
936:   PetscBool              flg;
937:   PetscErrorCode         ierr;

942:   PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
943:   /* Distribute cone section */
944:   PetscObjectGetComm((PetscObject)dm, &comm);
945:   DMPlexGetConeSection(dm, &originalConeSection);
946:   DMPlexGetConeSection(dmParallel, &newConeSection);
947:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
948:   DMSetUp(dmParallel);
949:   {
950:     PetscInt pStart, pEnd, p;

952:     PetscSectionGetChart(newConeSection, &pStart, &pEnd);
953:     for (p = pStart; p < pEnd; ++p) {
954:       PetscInt coneSize;
955:       PetscSectionGetDof(newConeSection, p, &coneSize);
956:       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
957:     }
958:   }
959:   /* Communicate and renumber cones */
960:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
961:   PetscFree(remoteOffsets);
962:   DMPlexGetCones(dm, &cones);
963:   if (original) {
964:     PetscInt numCones;

966:     PetscSectionGetStorageSize(originalConeSection,&numCones);
967:     PetscMalloc1(numCones,&globCones);
968:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
969:   } else {
970:     globCones = cones;
971:   }
972:   DMPlexGetCones(dmParallel, &newCones);
973:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
974:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
975:   if (original) {
976:     PetscFree(globCones);
977:   }
978:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
979:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
980:   if (PetscDefined(USE_DEBUG)) {
981:     PetscInt  p;
982:     PetscBool valid = PETSC_TRUE;
983:     for (p = 0; p < newConesSize; ++p) {
984:       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
985:     }
986:     if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
987:   }
988:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
989:   if (flg) {
990:     PetscPrintf(comm, "Serial Cone Section:\n");
991:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
992:     PetscPrintf(comm, "Parallel Cone Section:\n");
993:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
994:     PetscSFView(coneSF, NULL);
995:   }
996:   DMPlexGetConeOrientations(dm, &cones);
997:   DMPlexGetConeOrientations(dmParallel, &newCones);
998:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
999:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1000:   PetscSFDestroy(&coneSF);
1001:   PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1002:   /* Create supports and stratify DMPlex */
1003:   {
1004:     PetscInt pStart, pEnd;

1006:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1007:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1008:   }
1009:   DMPlexSymmetrize(dmParallel);
1010:   DMPlexStratify(dmParallel);
1011:   {
1012:     PetscBool useCone, useClosure, useAnchors;

1014:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1015:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1016:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1017:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1018:   }
1019:   return(0);
1020: }

1022: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1023: {
1024:   MPI_Comm         comm;
1025:   PetscSection     originalCoordSection, newCoordSection;
1026:   Vec              originalCoordinates, newCoordinates;
1027:   PetscInt         bs;
1028:   PetscBool        isper;
1029:   const char      *name;
1030:   const PetscReal *maxCell, *L;
1031:   const DMBoundaryType *bd;
1032:   PetscErrorCode   ierr;


1038:   PetscObjectGetComm((PetscObject)dm, &comm);
1039:   DMGetCoordinateSection(dm, &originalCoordSection);
1040:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1041:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1042:   if (originalCoordinates) {
1043:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1044:     PetscObjectGetName((PetscObject) originalCoordinates, &name);
1045:     PetscObjectSetName((PetscObject) newCoordinates, name);

1047:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1048:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1049:     VecGetBlockSize(originalCoordinates, &bs);
1050:     VecSetBlockSize(newCoordinates, bs);
1051:     VecDestroy(&newCoordinates);
1052:   }
1053:   DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1054:   DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1055:   return(0);
1056: }

1058: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1059: {
1060:   DM_Plex         *mesh = (DM_Plex*) dm->data;
1061:   MPI_Comm         comm;
1062:   DMLabel          depthLabel;
1063:   PetscMPIInt      rank;
1064:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1065:   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1066:   PetscObjectState depthState = -1;
1067:   PetscErrorCode   ierr;


1073:   PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1074:   PetscObjectGetComm((PetscObject)dm, &comm);
1075:   MPI_Comm_rank(comm, &rank);

1077:   /* If the user has changed the depth label, communicate it instead */
1078:   DMPlexGetDepth(dm, &depth);
1079:   DMPlexGetDepthLabel(dm, &depthLabel);
1080:   if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1081:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1082:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1083:   if (sendDepth) {
1084:     DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1085:     DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1086:   }
1087:   /* Everyone must have either the same number of labels, or none */
1088:   DMGetNumLabels(dm, &numLocalLabels);
1089:   numLabels = numLocalLabels;
1090:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1091:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1092:   for (l = 0; l < numLabels; ++l) {
1093:     DMLabel     label = NULL, labelNew = NULL;
1094:     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1095:     const char *name = NULL;

1097:     if (hasLabels) {
1098:       DMGetLabelByNum(dm, l, &label);
1099:       /* Skip "depth" because it is recreated */
1100:       PetscObjectGetName((PetscObject) label, &name);
1101:       PetscStrcmp(name, "depth", &isDepth);
1102:     }
1103:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1104:     if (isDepth && !sendDepth) continue;
1105:     DMLabelDistribute(label, migrationSF, &labelNew);
1106:     if (isDepth) {
1107:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1108:       PetscInt gdepth;

1110:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1111:       if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1112:       for (d = 0; d <= gdepth; ++d) {
1113:         PetscBool has;

1115:         DMLabelHasStratum(labelNew, d, &has);
1116:         if (!has) {DMLabelAddStratum(labelNew, d);}
1117:       }
1118:     }
1119:     DMAddLabel(dmParallel, labelNew);
1120:     /* Put the output flag in the new label */
1121:     if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1122:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1123:     PetscObjectGetName((PetscObject) labelNew, &name);
1124:     DMSetLabelOutput(dmParallel, name, isOutput);
1125:     DMLabelDestroy(&labelNew);
1126:   }
1127:   PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1128:   return(0);
1129: }

1131: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1132: {
1133:   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1134:   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1135:   MPI_Comm        comm;
1136:   DM              refTree;
1137:   PetscSection    origParentSection, newParentSection;
1138:   PetscInt        *origParents, *origChildIDs;
1139:   PetscBool       flg;
1140:   PetscErrorCode  ierr;

1145:   PetscObjectGetComm((PetscObject)dm, &comm);

1147:   /* Set up tree */
1148:   DMPlexGetReferenceTree(dm,&refTree);
1149:   DMPlexSetReferenceTree(dmParallel,refTree);
1150:   DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1151:   if (origParentSection) {
1152:     PetscInt        pStart, pEnd;
1153:     PetscInt        *newParents, *newChildIDs, *globParents;
1154:     PetscInt        *remoteOffsetsParents, newParentSize;
1155:     PetscSF         parentSF;

1157:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1158:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1159:     PetscSectionSetChart(newParentSection,pStart,pEnd);
1160:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1161:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1162:     PetscFree(remoteOffsetsParents);
1163:     PetscSectionGetStorageSize(newParentSection,&newParentSize);
1164:     PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1165:     if (original) {
1166:       PetscInt numParents;

1168:       PetscSectionGetStorageSize(origParentSection,&numParents);
1169:       PetscMalloc1(numParents,&globParents);
1170:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1171:     }
1172:     else {
1173:       globParents = origParents;
1174:     }
1175:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1176:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1177:     if (original) {
1178:       PetscFree(globParents);
1179:     }
1180:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1181:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1182:     ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1183:     if (PetscDefined(USE_DEBUG)) {
1184:       PetscInt  p;
1185:       PetscBool valid = PETSC_TRUE;
1186:       for (p = 0; p < newParentSize; ++p) {
1187:         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1188:       }
1189:       if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1190:     }
1191:     PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1192:     if (flg) {
1193:       PetscPrintf(comm, "Serial Parent Section: \n");
1194:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1195:       PetscPrintf(comm, "Parallel Parent Section: \n");
1196:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1197:       PetscSFView(parentSF, NULL);
1198:     }
1199:     DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1200:     PetscSectionDestroy(&newParentSection);
1201:     PetscFree2(newParents,newChildIDs);
1202:     PetscSFDestroy(&parentSF);
1203:   }
1204:   pmesh->useAnchors = mesh->useAnchors;
1205:   return(0);
1206: }

1208: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1209: {
1210:   PetscMPIInt            rank, size;
1211:   MPI_Comm               comm;
1212:   PetscErrorCode         ierr;


1218:   /* Create point SF for parallel mesh */
1219:   PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1220:   PetscObjectGetComm((PetscObject)dm, &comm);
1221:   MPI_Comm_rank(comm, &rank);
1222:   MPI_Comm_size(comm, &size);
1223:   {
1224:     const PetscInt *leaves;
1225:     PetscSFNode    *remotePoints, *rowners, *lowners;
1226:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1227:     PetscInt        pStart, pEnd;

1229:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1230:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1231:     PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1232:     for (p=0; p<numRoots; p++) {
1233:       rowners[p].rank  = -1;
1234:       rowners[p].index = -1;
1235:     }
1236:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1237:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1238:     for (p = 0; p < numLeaves; ++p) {
1239:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1240:         lowners[p].rank  = rank;
1241:         lowners[p].index = leaves ? leaves[p] : p;
1242:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1243:         lowners[p].rank  = -2;
1244:         lowners[p].index = -2;
1245:       }
1246:     }
1247:     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1248:       rowners[p].rank  = -3;
1249:       rowners[p].index = -3;
1250:     }
1251:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1252:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1253:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1254:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1255:     for (p = 0; p < numLeaves; ++p) {
1256:       if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1257:       if (lowners[p].rank != rank) ++numGhostPoints;
1258:     }
1259:     PetscMalloc1(numGhostPoints, &ghostPoints);
1260:     PetscMalloc1(numGhostPoints, &remotePoints);
1261:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1262:       if (lowners[p].rank != rank) {
1263:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1264:         remotePoints[gp].rank  = lowners[p].rank;
1265:         remotePoints[gp].index = lowners[p].index;
1266:         ++gp;
1267:       }
1268:     }
1269:     PetscFree2(rowners,lowners);
1270:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1271:     PetscSFSetFromOptions((dmParallel)->sf);
1272:   }
1273:   {
1274:     PetscBool useCone, useClosure, useAnchors;

1276:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1277:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1278:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1279:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1280:   }
1281:   PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1282:   return(0);
1283: }

1285: /*@
1286:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1288:   Input Parameters:
1289: + dm - The DMPlex object
1290: - flg - Balance the partition?

1292:   Level: intermediate

1294: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1295: @*/
1296: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1297: {
1298:   DM_Plex *mesh = (DM_Plex *)dm->data;

1301:   mesh->partitionBalance = flg;
1302:   return(0);
1303: }

1305: /*@
1306:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1308:   Input Parameter:
1309: . dm - The DMPlex object

1311:   Output Parameter:
1312: . flg - Balance the partition?

1314:   Level: intermediate

1316: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1317: @*/
1318: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1319: {
1320:   DM_Plex *mesh = (DM_Plex *)dm->data;

1325:   *flg = mesh->partitionBalance;
1326:   return(0);
1327: }

1329: typedef struct {
1330:   PetscInt vote, rank, index;
1331: } Petsc3Int;

1333: /* MaxLoc, but carry a third piece of information around */
1334: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1335: {
1336:   Petsc3Int *a = (Petsc3Int *)inout_;
1337:   Petsc3Int *b = (Petsc3Int *)in_;
1338:   PetscInt i, len = *len_;
1339:   for (i = 0; i < len; i++) {
1340:     if (a[i].vote < b[i].vote) {
1341:       a[i].vote = b[i].vote;
1342:       a[i].rank = b[i].rank;
1343:       a[i].index = b[i].index;
1344:     } else if (a[i].vote <= b[i].vote) {
1345:       if (a[i].rank >= b[i].rank) {
1346:         a[i].rank = b[i].rank;
1347:         a[i].index = b[i].index;
1348:       }
1349:     }
1350:   }
1351: }

1353: /*@C
1354:   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration

1356:   Input Parameters:
1357: + dm          - The source DMPlex object
1358: . migrationSF - The star forest that describes the parallel point remapping
1359: . ownership   - Flag causing a vote to determine point ownership

1361:   Output Parameter:
1362: - pointSF     - The star forest describing the point overlap in the remapped DM

1364:   Notes:
1365:   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.

1367:   Level: developer

1369: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1370: @*/
1371: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1372: {
1373:   PetscMPIInt        rank, size;
1374:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1375:   PetscInt          *pointLocal;
1376:   const PetscInt    *leaves;
1377:   const PetscSFNode *roots;
1378:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1379:   Vec                shifts;
1380:   const PetscInt     numShifts = 13759;
1381:   const PetscScalar *shift = NULL;
1382:   const PetscBool    shiftDebug = PETSC_FALSE;
1383:   PetscBool          balance;
1384:   PetscErrorCode     ierr;

1388:   MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1389:   MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1390:   PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);

1392:   DMPlexGetPartitionBalance(dm, &balance);
1393:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1394:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1395:   if (ownership) {
1396:     MPI_Op       op;
1397:     MPI_Datatype datatype;
1398:     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1399:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1400:     if (balance) {
1401:       PetscRandom r;

1403:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1404:       PetscRandomSetInterval(r, 0, 2467*size);
1405:       VecCreate(PETSC_COMM_SELF, &shifts);
1406:       VecSetSizes(shifts, numShifts, numShifts);
1407:       VecSetType(shifts, VECSTANDARD);
1408:       VecSetRandom(shifts, r);
1409:       PetscRandomDestroy(&r);
1410:       VecGetArrayRead(shifts, &shift);
1411:     }

1413:     PetscMalloc1(nroots, &rootVote);
1414:     PetscMalloc1(nleaves, &leafVote);
1415:     /* Point ownership vote: Process with highest rank owns shared points */
1416:     for (p = 0; p < nleaves; ++p) {
1417:       if (shiftDebug) {
1418:         PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1419:       }
1420:       /* Either put in a bid or we know we own it */
1421:       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1422:       leafVote[p].rank = rank;
1423:       leafVote[p].index = p;
1424:     }
1425:     for (p = 0; p < nroots; p++) {
1426:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1427:       rootVote[p].vote  = -3;
1428:       rootVote[p].rank  = -3;
1429:       rootVote[p].index = -3;
1430:     }
1431:     MPI_Type_contiguous(3, MPIU_INT, &datatype);
1432:     MPI_Type_commit(&datatype);
1433:     MPI_Op_create(&MaxLocCarry, 1, &op);
1434:     PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1435:     PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1436:     MPI_Op_free(&op);
1437:     MPI_Type_free(&datatype);
1438:     for (p = 0; p < nroots; p++) {
1439:       rootNodes[p].rank = rootVote[p].rank;
1440:       rootNodes[p].index = rootVote[p].index;
1441:     }
1442:     PetscFree(leafVote);
1443:     PetscFree(rootVote);
1444:   } else {
1445:     for (p = 0; p < nroots; p++) {
1446:       rootNodes[p].index = -1;
1447:       rootNodes[p].rank = rank;
1448:     }
1449:     for (p = 0; p < nleaves; p++) {
1450:       /* Write new local id into old location */
1451:       if (roots[p].rank == rank) {
1452:         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1453:       }
1454:     }
1455:   }
1456:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1457:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);

1459:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1460:     if (leafNodes[p].rank != rank) npointLeaves++;
1461:   }
1462:   PetscMalloc1(npointLeaves, &pointLocal);
1463:   PetscMalloc1(npointLeaves, &pointRemote);
1464:   for (idx = 0, p = 0; p < nleaves; p++) {
1465:     if (leafNodes[p].rank != rank) {
1466:       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1467:       pointLocal[idx] = p;
1468:       pointRemote[idx] = leafNodes[p];
1469:       idx++;
1470:     }
1471:   }
1472:   if (shift) {
1473:     VecRestoreArrayRead(shifts, &shift);
1474:     VecDestroy(&shifts);
1475:   }
1476:   if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1477:   PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1478:   PetscSFSetFromOptions(*pointSF);
1479:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1480:   PetscFree2(rootNodes, leafNodes);
1481:   PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1482:   return(0);
1483: }

1485: /*@C
1486:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1488:   Collective on dm

1490:   Input Parameters:
1491: + dm       - The source DMPlex object
1492: . sf       - The star forest communication context describing the migration pattern

1494:   Output Parameter:
1495: - targetDM - The target DMPlex object

1497:   Level: intermediate

1499: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1500: @*/
1501: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1502: {
1503:   MPI_Comm               comm;
1504:   PetscInt               dim, cdim, nroots;
1505:   PetscSF                sfPoint;
1506:   ISLocalToGlobalMapping ltogMigration;
1507:   ISLocalToGlobalMapping ltogOriginal = NULL;
1508:   PetscBool              flg;
1509:   PetscErrorCode         ierr;

1513:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1514:   PetscObjectGetComm((PetscObject) dm, &comm);
1515:   DMGetDimension(dm, &dim);
1516:   DMSetDimension(targetDM, dim);
1517:   DMGetCoordinateDim(dm, &cdim);
1518:   DMSetCoordinateDim(targetDM, cdim);

1520:   /* Check for a one-to-all distribution pattern */
1521:   DMGetPointSF(dm, &sfPoint);
1522:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1523:   if (nroots >= 0) {
1524:     IS        isOriginal;
1525:     PetscInt  n, size, nleaves;
1526:     PetscInt  *numbering_orig, *numbering_new;

1528:     /* Get the original point numbering */
1529:     DMPlexCreatePointNumbering(dm, &isOriginal);
1530:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1531:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1532:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1533:     /* Convert to positive global numbers */
1534:     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1535:     /* Derive the new local-to-global mapping from the old one */
1536:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1537:     PetscMalloc1(nleaves, &numbering_new);
1538:     PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new);
1539:     PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new);
1540:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1541:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1542:     ISDestroy(&isOriginal);
1543:   } else {
1544:     /* One-to-all distribution pattern: We can derive LToG from SF */
1545:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1546:   }
1547:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1548:   if (flg) {
1549:     PetscPrintf(comm, "Point renumbering for DM migration:\n");
1550:     ISLocalToGlobalMappingView(ltogMigration, NULL);
1551:   }
1552:   /* Migrate DM data to target DM */
1553:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1554:   DMPlexDistributeLabels(dm, sf, targetDM);
1555:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1556:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1557:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1558:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1559:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1560:   return(0);
1561: }

1563: /*@C
1564:   DMPlexDistribute - Distributes the mesh and any associated sections.

1566:   Collective on dm

1568:   Input Parameters:
1569: + dm  - The original DMPlex object
1570: - overlap - The overlap of partitions, 0 is the default

1572:   Output Parameters:
1573: + sf - The PetscSF used for point distribution, or NULL if not needed
1574: - dmParallel - The distributed DMPlex object

1576:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1578:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1579:   representation on the mesh.

1581:   Level: intermediate

1583: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1584: @*/
1585: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1586: {
1587:   MPI_Comm               comm;
1588:   PetscPartitioner       partitioner;
1589:   IS                     cellPart;
1590:   PetscSection           cellPartSection;
1591:   DM                     dmCoord;
1592:   DMLabel                lblPartition, lblMigration;
1593:   PetscSF                sfMigration, sfStratified, sfPoint;
1594:   PetscBool              flg, balance;
1595:   PetscMPIInt            rank, size;
1596:   PetscErrorCode         ierr;


1604:   if (sf) *sf = NULL;
1605:   *dmParallel = NULL;
1606:   PetscObjectGetComm((PetscObject)dm,&comm);
1607:   MPI_Comm_rank(comm, &rank);
1608:   MPI_Comm_size(comm, &size);
1609:   if (size == 1) return(0);

1611:   PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1612:   /* Create cell partition */
1613:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1614:   PetscSectionCreate(comm, &cellPartSection);
1615:   DMPlexGetPartitioner(dm, &partitioner);
1616:   PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1617:   PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1618:   {
1619:     /* Convert partition to DMLabel */
1620:     IS             is;
1621:     PetscHSetI     ht;
1622:     const PetscInt *points;
1623:     PetscInt       *iranks;
1624:     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;

1626:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1627:     /* Preallocate strata */
1628:     PetscHSetICreate(&ht);
1629:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1630:     for (proc = pStart; proc < pEnd; proc++) {
1631:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1632:       if (npoints) {PetscHSetIAdd(ht, proc);}
1633:     }
1634:     PetscHSetIGetSize(ht, &nranks);
1635:     PetscMalloc1(nranks, &iranks);
1636:     PetscHSetIGetElems(ht, &poff, iranks);
1637:     PetscHSetIDestroy(&ht);
1638:     DMLabelAddStrata(lblPartition, nranks, iranks);
1639:     PetscFree(iranks);
1640:     /* Inline DMPlexPartitionLabelClosure() */
1641:     ISGetIndices(cellPart, &points);
1642:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1643:     for (proc = pStart; proc < pEnd; proc++) {
1644:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1645:       if (!npoints) continue;
1646:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1647:       DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1648:       DMLabelSetStratumIS(lblPartition, proc, is);
1649:       ISDestroy(&is);
1650:     }
1651:     ISRestoreIndices(cellPart, &points);
1652:   }
1653:   PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);

1655:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1656:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1657:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1658:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1659:   PetscSFDestroy(&sfMigration);
1660:   sfMigration = sfStratified;
1661:   PetscSFSetUp(sfMigration);
1662:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1663:   PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1664:   if (flg) {
1665:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1666:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1667:   }

1669:   /* Create non-overlapping parallel DM and migrate internal data */
1670:   DMPlexCreate(comm, dmParallel);
1671:   PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1672:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1674:   /* Build the point SF without overlap */
1675:   DMPlexGetPartitionBalance(dm, &balance);
1676:   DMPlexSetPartitionBalance(*dmParallel, balance);
1677:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1678:   DMSetPointSF(*dmParallel, sfPoint);
1679:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1680:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1681:   if (flg) {PetscSFView(sfPoint, NULL);}

1683:   if (overlap > 0) {
1684:     DM                 dmOverlap;
1685:     PetscInt           nroots, nleaves, noldleaves, l;
1686:     const PetscInt    *oldLeaves;
1687:     PetscSFNode       *newRemote, *permRemote;
1688:     const PetscSFNode *oldRemote;
1689:     PetscSF            sfOverlap, sfOverlapPoint;

1691:     /* Add the partition overlap to the distributed DM */
1692:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1693:     DMDestroy(dmParallel);
1694:     *dmParallel = dmOverlap;
1695:     if (flg) {
1696:       PetscPrintf(comm, "Overlap Migration SF:\n");
1697:       PetscSFView(sfOverlap, NULL);
1698:     }

1700:     /* Re-map the migration SF to establish the full migration pattern */
1701:     PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1702:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1703:     PetscMalloc1(nleaves, &newRemote);
1704:     /* oldRemote: original root point mapping to original leaf point
1705:        newRemote: original leaf point mapping to overlapped leaf point */
1706:     if (oldLeaves) {
1707:       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1708:       PetscMalloc1(noldleaves, &permRemote);
1709:       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1710:       oldRemote = permRemote;
1711:     }
1712:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1713:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1714:     if (oldLeaves) {PetscFree(oldRemote);}
1715:     PetscSFCreate(comm, &sfOverlapPoint);
1716:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1717:     PetscSFDestroy(&sfOverlap);
1718:     PetscSFDestroy(&sfMigration);
1719:     sfMigration = sfOverlapPoint;
1720:   }
1721:   /* Cleanup Partition */
1722:   DMLabelDestroy(&lblPartition);
1723:   DMLabelDestroy(&lblMigration);
1724:   PetscSectionDestroy(&cellPartSection);
1725:   ISDestroy(&cellPart);
1726:   /* Copy BC */
1727:   DMCopyBoundary(dm, *dmParallel);
1728:   /* Create sfNatural */
1729:   if (dm->useNatural) {
1730:     PetscSection section;

1732:     DMGetLocalSection(dm, &section);
1733:     DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1734:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1735:   }
1736:   /* Cleanup */
1737:   if (sf) {*sf = sfMigration;}
1738:   else    {PetscSFDestroy(&sfMigration);}
1739:   PetscSFDestroy(&sfPoint);
1740:   PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1741:   return(0);
1742: }

1744: /*@C
1745:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1747:   Collective on dm

1749:   Input Parameters:
1750: + dm  - The non-overlapping distributed DMPlex object
1751: - overlap - The overlap of partitions (the same on all ranks)

1753:   Output Parameters:
1754: + sf - The PetscSF used for point distribution
1755: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1757:   Notes:
1758:   If the mesh was not distributed, the return value is NULL.

1760:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1761:   representation on the mesh.

1763:   Level: advanced

1765: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1766: @*/
1767: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1768: {
1769:   MPI_Comm               comm;
1770:   PetscMPIInt            size, rank;
1771:   PetscSection           rootSection, leafSection;
1772:   IS                     rootrank, leafrank;
1773:   DM                     dmCoord;
1774:   DMLabel                lblOverlap;
1775:   PetscSF                sfOverlap, sfStratified, sfPoint;
1776:   PetscErrorCode         ierr;


1784:   if (sf) *sf = NULL;
1785:   *dmOverlap  = NULL;
1786:   PetscObjectGetComm((PetscObject)dm,&comm);
1787:   MPI_Comm_size(comm, &size);
1788:   MPI_Comm_rank(comm, &rank);
1789:   if (size == 1) return(0);

1791:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1792:   /* Compute point overlap with neighbouring processes on the distributed DM */
1793:   PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1794:   PetscSectionCreate(comm, &rootSection);
1795:   PetscSectionCreate(comm, &leafSection);
1796:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1797:   DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1798:   /* Convert overlap label to stratified migration SF */
1799:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1800:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1801:   PetscSFDestroy(&sfOverlap);
1802:   sfOverlap = sfStratified;
1803:   PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1804:   PetscSFSetFromOptions(sfOverlap);

1806:   PetscSectionDestroy(&rootSection);
1807:   PetscSectionDestroy(&leafSection);
1808:   ISDestroy(&rootrank);
1809:   ISDestroy(&leafrank);
1810:   PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);

1812:   /* Build the overlapping DM */
1813:   DMPlexCreate(comm, dmOverlap);
1814:   PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1815:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1816:   /* Store the overlap in the new DM */
1817:   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1818:   /* Build the new point SF */
1819:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1820:   DMSetPointSF(*dmOverlap, sfPoint);
1821:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
1822:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1823:   PetscSFDestroy(&sfPoint);
1824:   /* Cleanup overlap partition */
1825:   DMLabelDestroy(&lblOverlap);
1826:   if (sf) *sf = sfOverlap;
1827:   else    {PetscSFDestroy(&sfOverlap);}
1828:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1829:   return(0);
1830: }

1832: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1833: {
1834:   DM_Plex        *mesh  = (DM_Plex*) dm->data;

1837:   *overlap = mesh->overlap;
1838:   return(0);
1839: }

1841: /*@
1842:   DMPlexGetOverlap - Get the DMPlex partition overlap.

1844:   Not collective

1846:   Input Parameter:
1847: . dm - The DM

1849:   Output Parameter:
1850: . overlap - The overlap of this DM

1852:   Level: intermediate

1854: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1855: @*/
1856: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1857: {
1858:   PetscErrorCode     ierr;

1862:   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1863:   return(0);
1864: }


1867: /*@C
1868:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1869:   root process of the original's communicator.

1871:   Collective on dm

1873:   Input Parameter:
1874: . dm - the original DMPlex object

1876:   Output Parameters:
1877: + sf - the PetscSF used for point distribution (optional)
1878: - gatherMesh - the gathered DM object, or NULL

1880:   Level: intermediate

1882: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1883: @*/
1884: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1885: {
1886:   MPI_Comm       comm;
1887:   PetscMPIInt    size;
1888:   PetscPartitioner oldPart, gatherPart;

1894:   *gatherMesh = NULL;
1895:   if (sf) *sf = NULL;
1896:   comm = PetscObjectComm((PetscObject)dm);
1897:   MPI_Comm_size(comm,&size);
1898:   if (size == 1) return(0);
1899:   DMPlexGetPartitioner(dm,&oldPart);
1900:   PetscObjectReference((PetscObject)oldPart);
1901:   PetscPartitionerCreate(comm,&gatherPart);
1902:   PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1903:   DMPlexSetPartitioner(dm,gatherPart);
1904:   DMPlexDistribute(dm,0,sf,gatherMesh);

1906:   DMPlexSetPartitioner(dm,oldPart);
1907:   PetscPartitionerDestroy(&gatherPart);
1908:   PetscPartitionerDestroy(&oldPart);
1909:   return(0);
1910: }

1912: /*@C
1913:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

1915:   Collective on dm

1917:   Input Parameter:
1918: . dm - the original DMPlex object

1920:   Output Parameters:
1921: + sf - the PetscSF used for point distribution (optional)
1922: - redundantMesh - the redundant DM object, or NULL

1924:   Level: intermediate

1926: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1927: @*/
1928: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1929: {
1930:   MPI_Comm       comm;
1931:   PetscMPIInt    size, rank;
1932:   PetscInt       pStart, pEnd, p;
1933:   PetscInt       numPoints = -1;
1934:   PetscSF        migrationSF, sfPoint, gatherSF;
1935:   DM             gatherDM, dmCoord;
1936:   PetscSFNode    *points;

1942:   *redundantMesh = NULL;
1943:   comm = PetscObjectComm((PetscObject)dm);
1944:   MPI_Comm_size(comm,&size);
1945:   if (size == 1) {
1946:     PetscObjectReference((PetscObject) dm);
1947:     *redundantMesh = dm;
1948:     if (sf) *sf = NULL;
1949:     return(0);
1950:   }
1951:   DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1952:   if (!gatherDM) return(0);
1953:   MPI_Comm_rank(comm,&rank);
1954:   DMPlexGetChart(gatherDM,&pStart,&pEnd);
1955:   numPoints = pEnd - pStart;
1956:   MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1957:   PetscMalloc1(numPoints,&points);
1958:   PetscSFCreate(comm,&migrationSF);
1959:   for (p = 0; p < numPoints; p++) {
1960:     points[p].index = p;
1961:     points[p].rank  = 0;
1962:   }
1963:   PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1964:   DMPlexCreate(comm, redundantMesh);
1965:   PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1966:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1967:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1968:   DMSetPointSF(*redundantMesh, sfPoint);
1969:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
1970:   if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1971:   PetscSFDestroy(&sfPoint);
1972:   if (sf) {
1973:     PetscSF tsf;

1975:     PetscSFCompose(gatherSF,migrationSF,&tsf);
1976:     DMPlexStratifyMigrationSF(dm, tsf, sf);
1977:     PetscSFDestroy(&tsf);
1978:   }
1979:   PetscSFDestroy(&migrationSF);
1980:   PetscSFDestroy(&gatherSF);
1981:   DMDestroy(&gatherDM);
1982:   return(0);
1983: }

1985: /*@
1986:   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.

1988:   Collective

1990:   Input Parameter:
1991: . dm      - The DM object

1993:   Output Parameter:
1994: . distributed - Flag whether the DM is distributed

1996:   Level: intermediate

1998:   Notes:
1999:   This currently finds out whether at least two ranks have any DAG points.
2000:   This involves MPI_Allreduce() with one integer.
2001:   The result is currently not stashed so every call to this routine involves this global communication.

2003: .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2004: @*/
2005: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2006: {
2007:   PetscInt          pStart, pEnd, count;
2008:   MPI_Comm          comm;
2009:   PetscErrorCode    ierr;

2014:   PetscObjectGetComm((PetscObject)dm,&comm);
2015:   DMPlexGetChart(dm, &pStart, &pEnd);
2016:   count = !!(pEnd - pStart);
2017:   MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2018:   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2019:   return(0);
2020: }