Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be `NULL` (to clear the callback)
10: - ctx - context for callback evaluation, may be `NULL`
12: Level: advanced
14: Notes:
15: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
19: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
25: PetscFunctionBegin;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: PetscFunctionReturn(PETSC_SUCCESS);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The `DM` object
38: Output Parameters:
39: + user - The callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
50: PetscFunctionBegin;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: PetscFunctionReturn(PETSC_SUCCESS);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The `DM` object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *)dm->data;
72: PetscFunctionBegin;
74: mesh->useAnchors = useAnchors;
75: PetscFunctionReturn(PETSC_SUCCESS);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The `DM` object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *)dm->data;
95: PetscFunctionBegin;
97: PetscAssertPointer(useAnchors, 2);
98: *useAnchors = mesh->useAnchors;
99: PetscFunctionReturn(PETSC_SUCCESS);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
107: PetscFunctionBeginHot;
108: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109: PetscCall(DMPlexGetCone(dm, p, &cone));
110: for (c = 0; c <= coneSize; ++c) {
111: const PetscInt point = !c ? p : cone[c - 1];
112: const PetscInt *support = NULL;
113: PetscInt supportSize, s, q;
115: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116: PetscCall(DMPlexGetSupport(dm, point, &support));
117: for (s = 0; s < supportSize; ++s) {
118: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
119: if (support[s] == adj[q]) break;
120: }
121: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
122: }
123: }
124: *adjSize = numAdj;
125: PetscFunctionReturn(PETSC_SUCCESS);
126: }
128: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129: {
130: const PetscInt *support = NULL;
131: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscFunctionBeginHot;
134: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135: PetscCall(DMPlexGetSupport(dm, p, &support));
136: for (s = 0; s <= supportSize; ++s) {
137: const PetscInt point = !s ? p : support[s - 1];
138: const PetscInt *cone = NULL;
139: PetscInt coneSize, c, q;
141: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142: PetscCall(DMPlexGetCone(dm, point, &cone));
143: for (c = 0; c < coneSize; ++c) {
144: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
145: if (cone[c] == adj[q]) break;
146: }
147: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
148: }
149: }
150: *adjSize = numAdj;
151: PetscFunctionReturn(PETSC_SUCCESS);
152: }
154: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155: {
156: PetscInt *star = NULL;
157: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
159: PetscFunctionBeginHot;
160: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161: for (s = 0; s < starSize * 2; s += 2) {
162: const PetscInt *closure = NULL;
163: PetscInt closureSize, c, q;
165: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
166: for (c = 0; c < closureSize * 2; c += 2) {
167: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
168: if (closure[c] == adj[q]) break;
169: }
170: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
171: }
172: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
173: }
174: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175: *adjSize = numAdj;
176: PetscFunctionReturn(PETSC_SUCCESS);
177: }
179: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180: {
181: static PetscInt asiz = 0;
182: PetscInt maxAnchors = 1;
183: PetscInt aStart = -1, aEnd = -1;
184: PetscInt maxAdjSize;
185: PetscSection aSec = NULL;
186: IS aIS = NULL;
187: const PetscInt *anchors;
188: DM_Plex *mesh = (DM_Plex *)dm->data;
190: PetscFunctionBeginHot;
191: if (useAnchors) {
192: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
193: if (aSec) {
194: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
195: maxAnchors = PetscMax(1, maxAnchors);
196: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
197: PetscCall(ISGetIndices(aIS, &anchors));
198: }
199: }
200: if (!*adj) {
201: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
203: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
204: PetscCall(DMPlexGetDepth(dm, &depth));
205: depth = PetscMax(depth, -depth);
206: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC, depth + 1) - 1) / (maxC - 1)) : depth + 1;
208: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS, depth + 1) - 1) / (maxS - 1)) : depth + 1;
209: asiz = PetscMax(PetscPowInt(maxS, depth) * coneSeries, PetscPowInt(maxC, depth) * supportSeries);
210: asiz *= maxAnchors;
211: asiz = PetscMin(asiz, pEnd - pStart);
212: PetscCall(PetscMalloc1(asiz, adj));
213: }
214: if (*adjSize < 0) *adjSize = asiz;
215: maxAdjSize = *adjSize;
216: if (mesh->useradjacency) {
217: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
218: } else if (useTransitiveClosure) {
219: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
220: } else if (useCone) {
221: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
222: } else {
223: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
224: }
225: if (useAnchors && aSec) {
226: PetscInt origSize = *adjSize;
227: PetscInt numAdj = origSize;
228: PetscInt i = 0, j;
229: PetscInt *orig = *adj;
231: while (i < origSize) {
232: PetscInt p = orig[i];
233: PetscInt aDof = 0;
235: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
236: if (aDof) {
237: PetscInt aOff;
238: PetscInt s, q;
240: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
241: origSize--;
242: numAdj--;
243: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
244: for (s = 0; s < aDof; ++s) {
245: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
246: if (anchors[aOff + s] == orig[q]) break;
247: }
248: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
249: }
250: } else {
251: i++;
252: }
253: }
254: *adjSize = numAdj;
255: PetscCall(ISRestoreIndices(aIS, &anchors));
256: }
257: PetscFunctionReturn(PETSC_SUCCESS);
258: }
260: /*@
261: DMPlexGetAdjacency - Return all points adjacent to the given point
263: Input Parameters:
264: + dm - The `DM` object
265: - p - The point
267: Input/Output Parameters:
268: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
269: on output the number of adjacent points
270: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
271: on output contains the adjacent points
273: Level: advanced
275: Notes:
276: The user must `PetscFree()` the `adj` array if it was not passed in.
278: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
279: @*/
280: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
281: {
282: PetscBool useCone, useClosure, useAnchors;
284: PetscFunctionBeginHot;
286: PetscAssertPointer(adjSize, 3);
287: PetscAssertPointer(adj, 4);
288: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
289: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
290: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
291: PetscFunctionReturn(PETSC_SUCCESS);
292: }
294: /*@
295: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
297: Collective
299: Input Parameters:
300: + dm - The `DM`
301: . sfPoint - The `PetscSF` which encodes point connectivity
302: . rootRankSection - to be documented
303: . rootRanks - to be documented
304: . leafRankSection - to be documented
305: - leafRanks - to be documented
307: Output Parameters:
308: + processRanks - A list of process neighbors, or `NULL`
309: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
311: Level: developer
313: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
314: @*/
315: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
316: {
317: const PetscSFNode *remotePoints;
318: PetscInt *localPointsNew;
319: PetscSFNode *remotePointsNew;
320: const PetscInt *nranks;
321: PetscInt *ranksNew;
322: PetscBT neighbors;
323: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
324: PetscMPIInt size, proc, rank;
326: PetscFunctionBegin;
329: if (processRanks) PetscAssertPointer(processRanks, 7);
330: if (sfProcess) PetscAssertPointer(sfProcess, 8);
331: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
332: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
333: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
334: PetscCall(PetscBTCreate(size, &neighbors));
335: PetscCall(PetscBTMemzero(size, neighbors));
336: /* Compute root-to-leaf process connectivity */
337: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
338: PetscCall(ISGetIndices(rootRanks, &nranks));
339: for (p = pStart; p < pEnd; ++p) {
340: PetscInt ndof, noff, n;
342: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
343: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
344: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
345: }
346: PetscCall(ISRestoreIndices(rootRanks, &nranks));
347: /* Compute leaf-to-neighbor process connectivity */
348: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
349: PetscCall(ISGetIndices(leafRanks, &nranks));
350: for (p = pStart; p < pEnd; ++p) {
351: PetscInt ndof, noff, n;
353: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
354: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
355: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
356: }
357: PetscCall(ISRestoreIndices(leafRanks, &nranks));
358: /* Compute leaf-to-root process connectivity */
359: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
360: /* Calculate edges */
361: PetscCall(PetscBTClear(neighbors, rank));
362: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
363: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
364: }
365: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
366: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
367: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
368: for (proc = 0, n = 0; proc < size; ++proc) {
369: if (PetscBTLookup(neighbors, proc)) {
370: ranksNew[n] = proc;
371: localPointsNew[n] = proc;
372: remotePointsNew[n].index = rank;
373: remotePointsNew[n].rank = proc;
374: ++n;
375: }
376: }
377: PetscCall(PetscBTDestroy(&neighbors));
378: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
379: else PetscCall(PetscFree(ranksNew));
380: if (sfProcess) {
381: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
382: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
383: PetscCall(PetscSFSetFromOptions(*sfProcess));
384: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
385: }
386: PetscFunctionReturn(PETSC_SUCCESS);
387: }
389: /*@
390: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
392: Collective
394: Input Parameter:
395: . dm - The `DM`
397: Output Parameters:
398: + rootSection - The number of leaves for a given root point
399: . rootrank - The rank of each edge into the root point
400: . leafSection - The number of processes sharing a given leaf point
401: - leafrank - The rank of each process sharing a leaf point
403: Level: developer
405: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
406: @*/
407: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
408: {
409: MPI_Comm comm;
410: PetscSF sfPoint;
411: const PetscInt *rootdegree;
412: PetscInt *myrank, *remoterank;
413: PetscInt pStart, pEnd, p, nedges;
414: PetscMPIInt rank;
416: PetscFunctionBegin;
417: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
418: PetscCallMPI(MPI_Comm_rank(comm, &rank));
419: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
420: PetscCall(DMGetPointSF(dm, &sfPoint));
421: /* Compute number of leaves for each root */
422: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
423: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
424: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
425: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
426: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
427: PetscCall(PetscSectionSetUp(rootSection));
428: /* Gather rank of each leaf to root */
429: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
430: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
431: PetscCall(PetscMalloc1(nedges, &remoterank));
432: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
433: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
434: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
435: PetscCall(PetscFree(myrank));
436: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
437: /* Distribute remote ranks to leaves */
438: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
439: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
440: PetscFunctionReturn(PETSC_SUCCESS);
441: }
443: /*@C
444: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
446: Collective
448: Input Parameters:
449: + dm - The `DM`
450: . levels - Number of overlap levels
451: . rootSection - The number of leaves for a given root point
452: . rootrank - The rank of each edge into the root point
453: . leafSection - The number of processes sharing a given leaf point
454: - leafrank - The rank of each process sharing a leaf point
456: Output Parameter:
457: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
459: Level: developer
461: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
462: @*/
463: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
464: {
465: MPI_Comm comm;
466: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
467: PetscSF sfPoint;
468: const PetscSFNode *remote;
469: const PetscInt *local;
470: const PetscInt *nrank, *rrank;
471: PetscInt *adj = NULL;
472: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
473: PetscMPIInt rank, size;
474: PetscBool flg;
476: PetscFunctionBegin;
477: *ovLabel = NULL;
478: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
479: PetscCallMPI(MPI_Comm_size(comm, &size));
480: PetscCallMPI(MPI_Comm_rank(comm, &rank));
481: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
482: PetscCall(DMGetPointSF(dm, &sfPoint));
483: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
484: if (!levels) {
485: /* Add owned points */
486: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
487: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
488: PetscFunctionReturn(PETSC_SUCCESS);
489: }
490: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
491: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
492: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
493: /* Handle leaves: shared with the root point */
494: for (l = 0; l < nleaves; ++l) {
495: PetscInt adjSize = PETSC_DETERMINE, a;
497: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
498: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
499: }
500: PetscCall(ISGetIndices(rootrank, &rrank));
501: PetscCall(ISGetIndices(leafrank, &nrank));
502: /* Handle roots */
503: for (p = pStart; p < pEnd; ++p) {
504: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
506: if ((p >= sStart) && (p < sEnd)) {
507: /* Some leaves share a root with other leaves on different processes */
508: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
509: if (neighbors) {
510: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
511: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
512: for (n = 0; n < neighbors; ++n) {
513: const PetscInt remoteRank = nrank[noff + n];
515: if (remoteRank == rank) continue;
516: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
517: }
518: }
519: }
520: /* Roots are shared with leaves */
521: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
522: if (!neighbors) continue;
523: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
524: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
525: for (n = 0; n < neighbors; ++n) {
526: const PetscInt remoteRank = rrank[noff + n];
528: if (remoteRank == rank) continue;
529: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
530: }
531: }
532: PetscCall(PetscFree(adj));
533: PetscCall(ISRestoreIndices(rootrank, &rrank));
534: PetscCall(ISRestoreIndices(leafrank, &nrank));
535: /* Add additional overlap levels */
536: for (l = 1; l < levels; l++) {
537: /* Propagate point donations over SF to capture remote connections */
538: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
539: /* Add next level of point donations to the label */
540: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
541: }
542: /* We require the closure in the overlap */
543: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
544: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
545: if (flg) {
546: PetscViewer viewer;
547: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
548: PetscCall(DMLabelView(ovAdjByRank, viewer));
549: }
550: /* Invert sender to receiver label */
551: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
552: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
553: /* Add owned points, except for shared local points */
554: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
555: for (l = 0; l < nleaves; ++l) {
556: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
557: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
558: }
559: /* Clean up */
560: PetscCall(DMLabelDestroy(&ovAdjByRank));
561: PetscFunctionReturn(PETSC_SUCCESS);
562: }
564: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
565: {
566: PetscInt neighbors, el;
568: PetscFunctionBegin;
569: PetscCall(PetscSectionGetDof(section, p, &neighbors));
570: if (neighbors) {
571: PetscInt *adj = NULL;
572: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
573: PetscMPIInt rank;
575: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
576: PetscCall(PetscSectionGetOffset(section, p, &noff));
577: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
578: for (n = 0; n < neighbors; ++n) {
579: const PetscInt remoteRank = ranks[noff + n];
581: if (remoteRank == rank) continue;
582: for (a = 0; a < adjSize; ++a) {
583: PetscBool insert = PETSC_TRUE;
585: for (el = 0; el < numExLabels; ++el) {
586: PetscInt exVal;
587: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
588: if (exVal == exValue[el]) {
589: insert = PETSC_FALSE;
590: break;
591: }
592: }
593: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
594: }
595: }
596: PetscCall(PetscFree(adj));
597: }
598: PetscFunctionReturn(PETSC_SUCCESS);
599: }
601: /*@C
602: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
604: Collective
606: Input Parameters:
607: + dm - The `DM`
608: . numLabels - The number of labels to draw candidate points from
609: . label - An array of labels containing candidate points
610: . value - An array of label values marking the candidate points
611: . numExLabels - The number of labels to use for exclusion
612: . exLabel - An array of labels indicating points to be excluded, or `NULL`
613: . exValue - An array of label values to be excluded, or `NULL`
614: . rootSection - The number of leaves for a given root point
615: . rootrank - The rank of each edge into the root point
616: . leafSection - The number of processes sharing a given leaf point
617: - leafrank - The rank of each process sharing a leaf point
619: Output Parameter:
620: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
622: Level: developer
624: Note:
625: The candidate points are only added to the overlap if they are adjacent to a shared point
627: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
628: @*/
629: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
630: {
631: MPI_Comm comm;
632: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
633: PetscSF sfPoint;
634: const PetscSFNode *remote;
635: const PetscInt *local;
636: const PetscInt *nrank, *rrank;
637: PetscInt *adj = NULL;
638: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
639: PetscMPIInt rank, size;
640: PetscBool flg;
642: PetscFunctionBegin;
643: *ovLabel = NULL;
644: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
645: PetscCallMPI(MPI_Comm_size(comm, &size));
646: PetscCallMPI(MPI_Comm_rank(comm, &rank));
647: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
648: PetscCall(DMGetPointSF(dm, &sfPoint));
649: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
650: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
651: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
652: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
653: PetscCall(ISGetIndices(rootrank, &rrank));
654: PetscCall(ISGetIndices(leafrank, &nrank));
655: for (l = 0; l < numLabels; ++l) {
656: IS valIS;
657: const PetscInt *points;
658: PetscInt n;
660: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
661: if (!valIS) continue;
662: PetscCall(ISGetIndices(valIS, &points));
663: PetscCall(ISGetLocalSize(valIS, &n));
664: for (PetscInt i = 0; i < n; ++i) {
665: const PetscInt p = points[i];
667: if ((p >= sStart) && (p < sEnd)) {
668: PetscInt loc, adjSize = PETSC_DETERMINE;
670: /* Handle leaves: shared with the root point */
671: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
672: else loc = (p >= 0 && p < nleaves) ? p : -1;
673: if (loc >= 0) {
674: const PetscInt remoteRank = remote[loc].rank;
676: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
677: for (PetscInt a = 0; a < adjSize; ++a) {
678: PetscBool insert = PETSC_TRUE;
680: for (el = 0; el < numExLabels; ++el) {
681: PetscInt exVal;
682: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
683: if (exVal == exValue[el]) {
684: insert = PETSC_FALSE;
685: break;
686: }
687: }
688: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
689: }
690: }
691: /* Some leaves share a root with other leaves on different processes */
692: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
693: }
694: /* Roots are shared with leaves */
695: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
696: }
697: PetscCall(ISRestoreIndices(valIS, &points));
698: PetscCall(ISDestroy(&valIS));
699: }
700: PetscCall(PetscFree(adj));
701: PetscCall(ISRestoreIndices(rootrank, &rrank));
702: PetscCall(ISRestoreIndices(leafrank, &nrank));
703: /* We require the closure in the overlap */
704: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
705: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
706: if (flg) {
707: PetscViewer viewer;
708: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
709: PetscCall(DMLabelView(ovAdjByRank, viewer));
710: }
711: /* Invert sender to receiver label */
712: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
713: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
714: /* Add owned points, except for shared local points */
715: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
716: for (l = 0; l < nleaves; ++l) {
717: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
718: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
719: }
720: /* Clean up */
721: PetscCall(DMLabelDestroy(&ovAdjByRank));
722: PetscFunctionReturn(PETSC_SUCCESS);
723: }
725: /*@C
726: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
728: Collective
730: Input Parameters:
731: + dm - The `DM`
732: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
734: Output Parameter:
735: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
737: Level: developer
739: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
740: @*/
741: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
742: {
743: MPI_Comm comm;
744: PetscMPIInt rank, size;
745: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
746: PetscInt *pointDepths, *remoteDepths, *ilocal;
747: PetscInt *depthRecv, *depthShift, *depthIdx;
748: PetscSFNode *iremote;
749: PetscSF pointSF;
750: const PetscInt *sharedLocal;
751: const PetscSFNode *overlapRemote, *sharedRemote;
753: PetscFunctionBegin;
755: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
756: PetscCallMPI(MPI_Comm_rank(comm, &rank));
757: PetscCallMPI(MPI_Comm_size(comm, &size));
758: PetscCall(DMGetDimension(dm, &dim));
760: /* Before building the migration SF we need to know the new stratum offsets */
761: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
762: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
763: for (d = 0; d < dim + 1; d++) {
764: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
765: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
766: }
767: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
768: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
769: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
771: /* Count received points in each stratum and compute the internal strata shift */
772: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
773: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
774: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
775: depthShift[dim] = 0;
776: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
777: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
778: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
779: for (d = 0; d < dim + 1; d++) {
780: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
781: depthIdx[d] = pStart + depthShift[d];
782: }
784: /* Form the overlap SF build an SF that describes the full overlap migration SF */
785: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
786: newLeaves = pEnd - pStart + nleaves;
787: PetscCall(PetscMalloc1(newLeaves, &ilocal));
788: PetscCall(PetscMalloc1(newLeaves, &iremote));
789: /* First map local points to themselves */
790: for (d = 0; d < dim + 1; d++) {
791: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
792: for (p = pStart; p < pEnd; p++) {
793: point = p + depthShift[d];
794: ilocal[point] = point;
795: iremote[point].index = p;
796: iremote[point].rank = rank;
797: depthIdx[d]++;
798: }
799: }
801: /* Add in the remote roots for currently shared points */
802: PetscCall(DMGetPointSF(dm, &pointSF));
803: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
804: for (d = 0; d < dim + 1; d++) {
805: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
806: for (p = 0; p < numSharedPoints; p++) {
807: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
808: point = sharedLocal[p] + depthShift[d];
809: iremote[point].index = sharedRemote[p].index;
810: iremote[point].rank = sharedRemote[p].rank;
811: }
812: }
813: }
815: /* Now add the incoming overlap points */
816: for (p = 0; p < nleaves; p++) {
817: point = depthIdx[remoteDepths[p]];
818: ilocal[point] = point;
819: iremote[point].index = overlapRemote[p].index;
820: iremote[point].rank = overlapRemote[p].rank;
821: depthIdx[remoteDepths[p]]++;
822: }
823: PetscCall(PetscFree2(pointDepths, remoteDepths));
825: PetscCall(PetscSFCreate(comm, migrationSF));
826: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
827: PetscCall(PetscSFSetFromOptions(*migrationSF));
828: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
829: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
831: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
832: PetscFunctionReturn(PETSC_SUCCESS);
833: }
835: /*@
836: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
838: Input Parameters:
839: + dm - The DM
840: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
842: Output Parameter:
843: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
845: Level: developer
847: Note:
848: This lexicographically sorts by (depth, cellType)
850: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
851: @*/
852: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
853: {
854: MPI_Comm comm;
855: PetscMPIInt rank, size;
856: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
857: PetscSFNode *pointDepths, *remoteDepths;
858: PetscInt *ilocal;
859: PetscInt *depthRecv, *depthShift, *depthIdx;
860: PetscInt *ctRecv, *ctShift, *ctIdx;
861: const PetscSFNode *iremote;
863: PetscFunctionBegin;
865: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
866: PetscCallMPI(MPI_Comm_rank(comm, &rank));
867: PetscCallMPI(MPI_Comm_size(comm, &size));
868: PetscCall(DMPlexGetDepth(dm, &ldepth));
869: PetscCall(DMGetDimension(dm, &dim));
870: PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
871: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
872: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
874: /* Before building the migration SF we need to know the new stratum offsets */
875: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
876: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
877: for (d = 0; d < depth + 1; ++d) {
878: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
879: for (p = pStart; p < pEnd; ++p) {
880: DMPolytopeType ct;
882: PetscCall(DMPlexGetCellType(dm, p, &ct));
883: pointDepths[p].index = d;
884: pointDepths[p].rank = ct;
885: }
886: }
887: for (p = 0; p < nleaves; ++p) {
888: remoteDepths[p].index = -1;
889: remoteDepths[p].rank = -1;
890: }
891: PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
892: PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
893: /* Count received points in each stratum and compute the internal strata shift */
894: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
895: for (p = 0; p < nleaves; ++p) {
896: if (remoteDepths[p].rank < 0) {
897: ++depthRecv[remoteDepths[p].index];
898: } else {
899: ++ctRecv[remoteDepths[p].rank];
900: }
901: }
902: {
903: PetscInt depths[4], dims[4], shift = 0, i, c;
905: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
906: Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
907: */
908: depths[0] = depth;
909: depths[1] = 0;
910: depths[2] = depth - 1;
911: depths[3] = 1;
912: dims[0] = dim;
913: dims[1] = 0;
914: dims[2] = dim - 1;
915: dims[3] = 1;
916: for (i = 0; i <= depth; ++i) {
917: const PetscInt dep = depths[i];
918: const PetscInt dim = dims[i];
920: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
921: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
922: ctShift[c] = shift;
923: shift += ctRecv[c];
924: }
925: depthShift[dep] = shift;
926: shift += depthRecv[dep];
927: }
928: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
929: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
931: if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
932: ctShift[c] = shift;
933: shift += ctRecv[c];
934: }
935: }
936: }
937: /* Derive a new local permutation based on stratified indices */
938: PetscCall(PetscMalloc1(nleaves, &ilocal));
939: for (p = 0; p < nleaves; ++p) {
940: const PetscInt dep = remoteDepths[p].index;
941: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
943: if ((PetscInt)ct < 0) {
944: ilocal[p] = depthShift[dep] + depthIdx[dep];
945: ++depthIdx[dep];
946: } else {
947: ilocal[p] = ctShift[ct] + ctIdx[ct];
948: ++ctIdx[ct];
949: }
950: }
951: PetscCall(PetscSFCreate(comm, migrationSF));
952: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
953: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
954: PetscCall(PetscFree2(pointDepths, remoteDepths));
955: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
956: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
957: PetscFunctionReturn(PETSC_SUCCESS);
958: }
960: /*@
961: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
963: Collective
965: Input Parameters:
966: + dm - The `DMPLEX` object
967: . pointSF - The `PetscSF` describing the communication pattern
968: . originalSection - The `PetscSection` for existing data layout
969: - originalVec - The existing data in a local vector
971: Output Parameters:
972: + newSection - The `PetscSF` describing the new data layout
973: - newVec - The new data in a local vector
975: Level: developer
977: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
978: @*/
979: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
980: {
981: PetscSF fieldSF;
982: PetscInt *remoteOffsets, fieldSize;
983: PetscScalar *originalValues, *newValues;
985: PetscFunctionBegin;
986: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
987: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
989: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
990: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
991: PetscCall(VecSetType(newVec, dm->vectype));
993: PetscCall(VecGetArray(originalVec, &originalValues));
994: PetscCall(VecGetArray(newVec, &newValues));
995: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
996: PetscCall(PetscFree(remoteOffsets));
997: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
998: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
999: PetscCall(PetscSFDestroy(&fieldSF));
1000: PetscCall(VecRestoreArray(newVec, &newValues));
1001: PetscCall(VecRestoreArray(originalVec, &originalValues));
1002: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1003: PetscFunctionReturn(PETSC_SUCCESS);
1004: }
1006: /*@
1007: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1009: Collective
1011: Input Parameters:
1012: + dm - The `DMPLEX` object
1013: . pointSF - The `PetscSF` describing the communication pattern
1014: . originalSection - The `PetscSection` for existing data layout
1015: - originalIS - The existing data
1017: Output Parameters:
1018: + newSection - The `PetscSF` describing the new data layout
1019: - newIS - The new data
1021: Level: developer
1023: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1024: @*/
1025: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1026: {
1027: PetscSF fieldSF;
1028: PetscInt *newValues, *remoteOffsets, fieldSize;
1029: const PetscInt *originalValues;
1031: PetscFunctionBegin;
1032: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1033: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1035: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1036: PetscCall(PetscMalloc1(fieldSize, &newValues));
1038: PetscCall(ISGetIndices(originalIS, &originalValues));
1039: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1040: PetscCall(PetscFree(remoteOffsets));
1041: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1042: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1043: PetscCall(PetscSFDestroy(&fieldSF));
1044: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1045: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1046: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1047: PetscFunctionReturn(PETSC_SUCCESS);
1048: }
1050: /*@
1051: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1053: Collective
1055: Input Parameters:
1056: + dm - The `DMPLEX` object
1057: . pointSF - The `PetscSF` describing the communication pattern
1058: . originalSection - The `PetscSection` for existing data layout
1059: . datatype - The type of data
1060: - originalData - The existing data
1062: Output Parameters:
1063: + newSection - The `PetscSection` describing the new data layout
1064: - newData - The new data
1066: Level: developer
1068: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`
1069: @*/
1070: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1071: {
1072: PetscSF fieldSF;
1073: PetscInt *remoteOffsets, fieldSize;
1074: PetscMPIInt dataSize;
1076: PetscFunctionBegin;
1077: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1078: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1080: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1081: PetscCallMPI(MPI_Type_size(datatype, &dataSize));
1082: PetscCall(PetscMalloc(fieldSize * dataSize, newData));
1084: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1085: PetscCall(PetscFree(remoteOffsets));
1086: PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1087: PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1088: PetscCall(PetscSFDestroy(&fieldSF));
1089: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1090: PetscFunctionReturn(PETSC_SUCCESS);
1091: }
1093: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1094: {
1095: DM_Plex *pmesh = (DM_Plex *)(dmParallel)->data;
1096: MPI_Comm comm;
1097: PetscSF coneSF;
1098: PetscSection originalConeSection, newConeSection;
1099: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1100: PetscBool flg;
1102: PetscFunctionBegin;
1105: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1106: /* Distribute cone section */
1107: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1108: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1109: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1110: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1111: PetscCall(DMSetUp(dmParallel));
1112: /* Communicate and renumber cones */
1113: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1114: PetscCall(PetscFree(remoteOffsets));
1115: PetscCall(DMPlexGetCones(dm, &cones));
1116: if (original) {
1117: PetscInt numCones;
1119: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1120: PetscCall(PetscMalloc1(numCones, &globCones));
1121: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1122: } else {
1123: globCones = cones;
1124: }
1125: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1126: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1127: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1128: if (original) PetscCall(PetscFree(globCones));
1129: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1130: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1131: if (PetscDefined(USE_DEBUG)) {
1132: PetscInt p;
1133: PetscBool valid = PETSC_TRUE;
1134: for (p = 0; p < newConesSize; ++p) {
1135: if (newCones[p] < 0) {
1136: valid = PETSC_FALSE;
1137: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1138: }
1139: }
1140: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1141: }
1142: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1143: if (flg) {
1144: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1145: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1146: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1147: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1148: PetscCall(PetscSFView(coneSF, NULL));
1149: }
1150: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1151: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1152: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1153: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1154: PetscCall(PetscSFDestroy(&coneSF));
1155: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1156: /* Create supports and stratify DMPlex */
1157: {
1158: PetscInt pStart, pEnd;
1160: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1161: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1162: }
1163: PetscCall(DMPlexSymmetrize(dmParallel));
1164: PetscCall(DMPlexStratify(dmParallel));
1165: {
1166: PetscBool useCone, useClosure, useAnchors;
1168: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1169: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1170: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1171: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1172: }
1173: PetscFunctionReturn(PETSC_SUCCESS);
1174: }
1176: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1177: {
1178: MPI_Comm comm;
1179: DM cdm, cdmParallel;
1180: PetscSection originalCoordSection, newCoordSection;
1181: Vec originalCoordinates, newCoordinates;
1182: PetscInt bs;
1183: const char *name;
1184: const PetscReal *maxCell, *Lstart, *L;
1186: PetscFunctionBegin;
1190: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1191: PetscCall(DMGetCoordinateDM(dm, &cdm));
1192: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1193: PetscCall(DMCopyDisc(cdm, cdmParallel));
1194: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1195: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1196: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1197: if (originalCoordinates) {
1198: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1199: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1200: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1202: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1203: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1204: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1205: PetscCall(VecSetBlockSize(newCoordinates, bs));
1206: PetscCall(VecDestroy(&newCoordinates));
1207: }
1209: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1210: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1211: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1212: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1213: if (cdm) {
1214: PetscCall(DMClone(dmParallel, &cdmParallel));
1215: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1216: PetscCall(DMCopyDisc(cdm, cdmParallel));
1217: PetscCall(DMDestroy(&cdmParallel));
1218: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1219: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1220: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1221: if (originalCoordinates) {
1222: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1223: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1224: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1226: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1227: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1228: PetscCall(VecSetBlockSize(newCoordinates, bs));
1229: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1230: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1231: PetscCall(VecDestroy(&newCoordinates));
1232: }
1233: PetscCall(PetscSectionDestroy(&newCoordSection));
1234: }
1235: PetscFunctionReturn(PETSC_SUCCESS);
1236: }
1238: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1239: {
1240: DM_Plex *mesh = (DM_Plex *)dm->data;
1241: MPI_Comm comm;
1242: DMLabel depthLabel;
1243: PetscMPIInt rank;
1244: PetscInt depth, d, numLabels, numLocalLabels, l;
1245: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1246: PetscObjectState depthState = -1;
1248: PetscFunctionBegin;
1252: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1253: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1254: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1256: /* If the user has changed the depth label, communicate it instead */
1257: PetscCall(DMPlexGetDepth(dm, &depth));
1258: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1259: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1260: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1261: PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1262: if (sendDepth) {
1263: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1264: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1265: }
1266: /* Everyone must have either the same number of labels, or none */
1267: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1268: numLabels = numLocalLabels;
1269: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1270: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1271: for (l = 0; l < numLabels; ++l) {
1272: DMLabel label = NULL, labelNew = NULL;
1273: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1274: const char *name = NULL;
1276: if (hasLabels) {
1277: PetscCall(DMGetLabelByNum(dm, l, &label));
1278: /* Skip "depth" because it is recreated */
1279: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1280: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1281: } else {
1282: isDepth = PETSC_FALSE;
1283: }
1284: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1285: if (isDepth && !sendDepth) continue;
1286: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1287: if (isDepth) {
1288: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1289: PetscInt gdepth;
1291: PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1292: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1293: for (d = 0; d <= gdepth; ++d) {
1294: PetscBool has;
1296: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1297: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1298: }
1299: }
1300: PetscCall(DMAddLabel(dmParallel, labelNew));
1301: /* Put the output flag in the new label */
1302: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1303: PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1304: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1305: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1306: PetscCall(DMLabelDestroy(&labelNew));
1307: }
1308: {
1309: DMLabel ctLabel;
1311: // Reset label for fast lookup
1312: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1313: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1314: }
1315: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1316: PetscFunctionReturn(PETSC_SUCCESS);
1317: }
1319: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1320: {
1321: DM_Plex *mesh = (DM_Plex *)dm->data;
1322: DM_Plex *pmesh = (DM_Plex *)(dmParallel)->data;
1323: MPI_Comm comm;
1324: DM refTree;
1325: PetscSection origParentSection, newParentSection;
1326: PetscInt *origParents, *origChildIDs;
1327: PetscBool flg;
1329: PetscFunctionBegin;
1332: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1334: /* Set up tree */
1335: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1336: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1337: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1338: if (origParentSection) {
1339: PetscInt pStart, pEnd;
1340: PetscInt *newParents, *newChildIDs, *globParents;
1341: PetscInt *remoteOffsetsParents, newParentSize;
1342: PetscSF parentSF;
1344: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1345: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1346: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1347: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1348: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1349: PetscCall(PetscFree(remoteOffsetsParents));
1350: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1351: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1352: if (original) {
1353: PetscInt numParents;
1355: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1356: PetscCall(PetscMalloc1(numParents, &globParents));
1357: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1358: } else {
1359: globParents = origParents;
1360: }
1361: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1362: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1363: if (original) PetscCall(PetscFree(globParents));
1364: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1365: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1366: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1367: if (PetscDefined(USE_DEBUG)) {
1368: PetscInt p;
1369: PetscBool valid = PETSC_TRUE;
1370: for (p = 0; p < newParentSize; ++p) {
1371: if (newParents[p] < 0) {
1372: valid = PETSC_FALSE;
1373: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1374: }
1375: }
1376: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1377: }
1378: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1379: if (flg) {
1380: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1381: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1382: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1383: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1384: PetscCall(PetscSFView(parentSF, NULL));
1385: }
1386: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1387: PetscCall(PetscSectionDestroy(&newParentSection));
1388: PetscCall(PetscFree2(newParents, newChildIDs));
1389: PetscCall(PetscSFDestroy(&parentSF));
1390: }
1391: pmesh->useAnchors = mesh->useAnchors;
1392: PetscFunctionReturn(PETSC_SUCCESS);
1393: }
1395: /*@
1396: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1398: Input Parameters:
1399: + dm - The `DMPLEX` object
1400: - flg - Balance the partition?
1402: Level: intermediate
1404: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1405: @*/
1406: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1407: {
1408: DM_Plex *mesh = (DM_Plex *)dm->data;
1410: PetscFunctionBegin;
1411: mesh->partitionBalance = flg;
1412: PetscFunctionReturn(PETSC_SUCCESS);
1413: }
1415: /*@
1416: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1418: Input Parameter:
1419: . dm - The `DMPLEX` object
1421: Output Parameter:
1422: . flg - Balance the partition?
1424: Level: intermediate
1426: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1427: @*/
1428: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1429: {
1430: DM_Plex *mesh = (DM_Plex *)dm->data;
1432: PetscFunctionBegin;
1434: PetscAssertPointer(flg, 2);
1435: *flg = mesh->partitionBalance;
1436: PetscFunctionReturn(PETSC_SUCCESS);
1437: }
1439: typedef struct {
1440: PetscInt vote, rank, index;
1441: } Petsc3Int;
1443: /* MaxLoc, but carry a third piece of information around */
1444: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1445: {
1446: Petsc3Int *a = (Petsc3Int *)inout_;
1447: Petsc3Int *b = (Petsc3Int *)in_;
1448: PetscInt i, len = *len_;
1449: for (i = 0; i < len; i++) {
1450: if (a[i].vote < b[i].vote) {
1451: a[i].vote = b[i].vote;
1452: a[i].rank = b[i].rank;
1453: a[i].index = b[i].index;
1454: } else if (a[i].vote <= b[i].vote) {
1455: if (a[i].rank >= b[i].rank) {
1456: a[i].rank = b[i].rank;
1457: a[i].index = b[i].index;
1458: }
1459: }
1460: }
1461: }
1463: /*@C
1464: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1466: Input Parameters:
1467: + dm - The source `DMPLEX` object
1468: . migrationSF - The star forest that describes the parallel point remapping
1469: - ownership - Flag causing a vote to determine point ownership
1471: Output Parameter:
1472: . pointSF - The star forest describing the point overlap in the remapped `DM`
1474: Level: developer
1476: Note:
1477: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1479: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1480: @*/
1481: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1482: {
1483: PetscMPIInt rank, size;
1484: PetscInt p, nroots, nleaves, idx, npointLeaves;
1485: PetscInt *pointLocal;
1486: const PetscInt *leaves;
1487: const PetscSFNode *roots;
1488: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1489: Vec shifts;
1490: const PetscInt numShifts = 13759;
1491: const PetscScalar *shift = NULL;
1492: const PetscBool shiftDebug = PETSC_FALSE;
1493: PetscBool balance;
1495: PetscFunctionBegin;
1497: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1498: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1499: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1501: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1502: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1503: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1504: if (ownership) {
1505: MPI_Op op;
1506: MPI_Datatype datatype;
1507: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1508: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1509: if (balance) {
1510: PetscRandom r;
1512: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1513: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1514: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1515: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1516: PetscCall(VecSetType(shifts, VECSTANDARD));
1517: PetscCall(VecSetRandom(shifts, r));
1518: PetscCall(PetscRandomDestroy(&r));
1519: PetscCall(VecGetArrayRead(shifts, &shift));
1520: }
1522: PetscCall(PetscMalloc1(nroots, &rootVote));
1523: PetscCall(PetscMalloc1(nleaves, &leafVote));
1524: /* Point ownership vote: Process with highest rank owns shared points */
1525: for (p = 0; p < nleaves; ++p) {
1526: if (shiftDebug) {
1527: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1528: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1529: }
1530: /* Either put in a bid or we know we own it */
1531: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1532: leafVote[p].rank = rank;
1533: leafVote[p].index = p;
1534: }
1535: for (p = 0; p < nroots; p++) {
1536: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1537: rootVote[p].vote = -3;
1538: rootVote[p].rank = -3;
1539: rootVote[p].index = -3;
1540: }
1541: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1542: PetscCallMPI(MPI_Type_commit(&datatype));
1543: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1544: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1545: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1546: PetscCallMPI(MPI_Op_free(&op));
1547: PetscCallMPI(MPI_Type_free(&datatype));
1548: for (p = 0; p < nroots; p++) {
1549: rootNodes[p].rank = rootVote[p].rank;
1550: rootNodes[p].index = rootVote[p].index;
1551: }
1552: PetscCall(PetscFree(leafVote));
1553: PetscCall(PetscFree(rootVote));
1554: } else {
1555: for (p = 0; p < nroots; p++) {
1556: rootNodes[p].index = -1;
1557: rootNodes[p].rank = rank;
1558: }
1559: for (p = 0; p < nleaves; p++) {
1560: /* Write new local id into old location */
1561: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1562: }
1563: }
1564: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1565: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1567: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1568: if (leafNodes[p].rank != rank) npointLeaves++;
1569: }
1570: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1571: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1572: for (idx = 0, p = 0; p < nleaves; p++) {
1573: if (leafNodes[p].rank != rank) {
1574: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1575: pointLocal[idx] = p;
1576: pointRemote[idx] = leafNodes[p];
1577: idx++;
1578: }
1579: }
1580: if (shift) {
1581: PetscCall(VecRestoreArrayRead(shifts, &shift));
1582: PetscCall(VecDestroy(&shifts));
1583: }
1584: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1585: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1586: PetscCall(PetscSFSetFromOptions(*pointSF));
1587: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1588: PetscCall(PetscFree2(rootNodes, leafNodes));
1589: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1590: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1591: PetscFunctionReturn(PETSC_SUCCESS);
1592: }
1594: /*@C
1595: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1597: Collective
1599: Input Parameters:
1600: + dm - The source `DMPLEX` object
1601: - sf - The star forest communication context describing the migration pattern
1603: Output Parameter:
1604: . targetDM - The target `DMPLEX` object
1606: Level: intermediate
1608: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1609: @*/
1610: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1611: {
1612: MPI_Comm comm;
1613: PetscInt dim, cdim, nroots;
1614: PetscSF sfPoint;
1615: ISLocalToGlobalMapping ltogMigration;
1616: ISLocalToGlobalMapping ltogOriginal = NULL;
1618: PetscFunctionBegin;
1620: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1621: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1622: PetscCall(DMGetDimension(dm, &dim));
1623: PetscCall(DMSetDimension(targetDM, dim));
1624: PetscCall(DMGetCoordinateDim(dm, &cdim));
1625: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1627: /* Check for a one-to-all distribution pattern */
1628: PetscCall(DMGetPointSF(dm, &sfPoint));
1629: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1630: if (nroots >= 0) {
1631: IS isOriginal;
1632: PetscInt n, size, nleaves;
1633: PetscInt *numbering_orig, *numbering_new;
1635: /* Get the original point numbering */
1636: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1637: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1638: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1639: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1640: /* Convert to positive global numbers */
1641: for (n = 0; n < size; n++) {
1642: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1643: }
1644: /* Derive the new local-to-global mapping from the old one */
1645: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1646: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1647: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1648: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1649: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1650: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1651: PetscCall(ISDestroy(&isOriginal));
1652: } else {
1653: /* One-to-all distribution pattern: We can derive LToG from SF */
1654: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1655: }
1656: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1657: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1658: /* Migrate DM data to target DM */
1659: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1660: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1661: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1662: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1663: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1664: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1665: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1666: PetscFunctionReturn(PETSC_SUCCESS);
1667: }
1669: /*@C
1670: DMPlexDistribute - Distributes the mesh and any associated sections.
1672: Collective
1674: Input Parameters:
1675: + dm - The original `DMPLEX` object
1676: - overlap - The overlap of partitions, 0 is the default
1678: Output Parameters:
1679: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1680: - dmParallel - The distributed `DMPLEX` object
1682: Level: intermediate
1684: Note:
1685: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1687: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1688: representation on the mesh.
1690: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1691: @*/
1692: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1693: {
1694: MPI_Comm comm;
1695: PetscPartitioner partitioner;
1696: IS cellPart;
1697: PetscSection cellPartSection;
1698: DM dmCoord;
1699: DMLabel lblPartition, lblMigration;
1700: PetscSF sfMigration, sfStratified, sfPoint;
1701: PetscBool flg, balance;
1702: PetscMPIInt rank, size;
1704: PetscFunctionBegin;
1707: if (sf) PetscAssertPointer(sf, 3);
1708: PetscAssertPointer(dmParallel, 4);
1710: if (sf) *sf = NULL;
1711: *dmParallel = NULL;
1712: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1713: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1714: PetscCallMPI(MPI_Comm_size(comm, &size));
1715: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1717: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1718: /* Create cell partition */
1719: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1720: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1721: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1722: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1723: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1724: {
1725: /* Convert partition to DMLabel */
1726: IS is;
1727: PetscHSetI ht;
1728: const PetscInt *points;
1729: PetscInt *iranks;
1730: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1732: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1733: /* Preallocate strata */
1734: PetscCall(PetscHSetICreate(&ht));
1735: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1736: for (proc = pStart; proc < pEnd; proc++) {
1737: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1738: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1739: }
1740: PetscCall(PetscHSetIGetSize(ht, &nranks));
1741: PetscCall(PetscMalloc1(nranks, &iranks));
1742: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1743: PetscCall(PetscHSetIDestroy(&ht));
1744: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1745: PetscCall(PetscFree(iranks));
1746: /* Inline DMPlexPartitionLabelClosure() */
1747: PetscCall(ISGetIndices(cellPart, &points));
1748: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1749: for (proc = pStart; proc < pEnd; proc++) {
1750: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1751: if (!npoints) continue;
1752: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1753: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1754: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1755: PetscCall(ISDestroy(&is));
1756: }
1757: PetscCall(ISRestoreIndices(cellPart, &points));
1758: }
1759: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1761: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1762: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1763: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration));
1764: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1765: PetscCall(PetscSFDestroy(&sfMigration));
1766: sfMigration = sfStratified;
1767: PetscCall(PetscSFSetUp(sfMigration));
1768: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1769: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1770: if (flg) {
1771: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1772: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1773: }
1775: /* Create non-overlapping parallel DM and migrate internal data */
1776: PetscCall(DMPlexCreate(comm, dmParallel));
1777: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1778: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1780: /* Build the point SF without overlap */
1781: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1782: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1783: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1784: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1785: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1786: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1787: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1788: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1790: if (overlap > 0) {
1791: DM dmOverlap;
1792: PetscInt nroots, nleaves, noldleaves, l;
1793: const PetscInt *oldLeaves;
1794: PetscSFNode *newRemote, *permRemote;
1795: const PetscSFNode *oldRemote;
1796: PetscSF sfOverlap, sfOverlapPoint;
1798: /* Add the partition overlap to the distributed DM */
1799: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1800: PetscCall(DMDestroy(dmParallel));
1801: *dmParallel = dmOverlap;
1802: if (flg) {
1803: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1804: PetscCall(PetscSFView(sfOverlap, NULL));
1805: }
1807: /* Re-map the migration SF to establish the full migration pattern */
1808: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1809: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1810: PetscCall(PetscMalloc1(nleaves, &newRemote));
1811: /* oldRemote: original root point mapping to original leaf point
1812: newRemote: original leaf point mapping to overlapped leaf point */
1813: if (oldLeaves) {
1814: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1815: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1816: for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1817: oldRemote = permRemote;
1818: }
1819: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1820: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1821: if (oldLeaves) PetscCall(PetscFree(oldRemote));
1822: PetscCall(PetscSFCreate(comm, &sfOverlapPoint));
1823: PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1824: PetscCall(PetscSFDestroy(&sfOverlap));
1825: PetscCall(PetscSFDestroy(&sfMigration));
1826: sfMigration = sfOverlapPoint;
1827: }
1828: /* Cleanup Partition */
1829: PetscCall(DMLabelDestroy(&lblPartition));
1830: PetscCall(DMLabelDestroy(&lblMigration));
1831: PetscCall(PetscSectionDestroy(&cellPartSection));
1832: PetscCall(ISDestroy(&cellPart));
1833: /* Copy discretization */
1834: PetscCall(DMCopyDisc(dm, *dmParallel));
1835: /* Create sfNatural */
1836: if (dm->useNatural) {
1837: PetscSection section;
1839: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1840: PetscCall(DMGetLocalSection(dm, §ion));
1842: /* First DM with useNatural = PETSC_TRUE is considered natural */
1843: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1844: /* Compose with a previous sfNatural if present */
1845: if (dm->sfNatural) {
1846: PetscSF natSF;
1848: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &natSF));
1849: PetscCall(PetscSFCompose(dm->sfNatural, natSF, &(*dmParallel)->sfNatural));
1850: PetscCall(PetscSFDestroy(&natSF));
1851: } else {
1852: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1853: }
1854: /* Compose with a previous sfMigration if present */
1855: if (dm->sfMigration) {
1856: PetscSF naturalPointSF;
1858: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
1859: PetscCall(PetscSFDestroy(&sfMigration));
1860: sfMigration = naturalPointSF;
1861: }
1862: }
1863: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1864: /* Cleanup */
1865: if (sf) {
1866: *sf = sfMigration;
1867: } else PetscCall(PetscSFDestroy(&sfMigration));
1868: PetscCall(PetscSFDestroy(&sfPoint));
1869: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
1870: PetscFunctionReturn(PETSC_SUCCESS);
1871: }
1873: /*@C
1874: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
1876: Collective
1878: Input Parameters:
1879: + dm - The non-overlapping distributed `DMPLEX` object
1880: - overlap - The overlap of partitions (the same on all ranks)
1882: Output Parameters:
1883: + sf - The `PetscSF` used for point distribution
1884: - dmOverlap - The overlapping distributed `DMPLEX` object, or `NULL`
1886: Options Database Keys:
1887: + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
1888: . -dm_plex_overlap_values <int1,int2,...> - List of overlap label values
1889: . -dm_plex_overlap_exclude_label <name> - Label used to exclude points from overlap
1890: - -dm_plex_overlap_exclude_value <int> - Label value used to exclude points from overlap
1892: Level: advanced
1894: Notes:
1895: If the mesh was not distributed, the return value is `NULL`.
1897: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1898: representation on the mesh.
1900: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
1901: @*/
1902: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1903: {
1904: DM_Plex *mesh = (DM_Plex *)dm->data;
1905: MPI_Comm comm;
1906: PetscMPIInt size, rank;
1907: PetscSection rootSection, leafSection;
1908: IS rootrank, leafrank;
1909: DM dmCoord;
1910: DMLabel lblOverlap;
1911: PetscSF sfOverlap, sfStratified, sfPoint;
1913: PetscFunctionBegin;
1916: if (sf) PetscAssertPointer(sf, 3);
1917: PetscAssertPointer(dmOverlap, 4);
1919: if (sf) *sf = NULL;
1920: *dmOverlap = NULL;
1921: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1922: PetscCallMPI(MPI_Comm_size(comm, &size));
1923: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1924: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1925: {
1926: // We need to get options for the _already_distributed mesh, so it must be done here
1927: PetscInt overlap;
1928: const char *prefix;
1929: char oldPrefix[PETSC_MAX_PATH_LEN];
1931: oldPrefix[0] = '\0';
1932: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
1933: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
1934: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
1935: PetscCall(DMPlexGetOverlap(dm, &overlap));
1936: PetscObjectOptionsBegin((PetscObject)dm);
1937: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
1938: PetscOptionsEnd();
1939: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
1940: }
1941: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1942: /* Compute point overlap with neighbouring processes on the distributed DM */
1943: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1944: PetscCall(PetscSectionCreate(comm, &rootSection));
1945: PetscCall(PetscSectionCreate(comm, &leafSection));
1946: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1947: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1948: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1949: /* Convert overlap label to stratified migration SF */
1950: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap));
1951: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1952: PetscCall(PetscSFDestroy(&sfOverlap));
1953: sfOverlap = sfStratified;
1954: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
1955: PetscCall(PetscSFSetFromOptions(sfOverlap));
1957: PetscCall(PetscSectionDestroy(&rootSection));
1958: PetscCall(PetscSectionDestroy(&leafSection));
1959: PetscCall(ISDestroy(&rootrank));
1960: PetscCall(ISDestroy(&leafrank));
1961: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1963: /* Build the overlapping DM */
1964: PetscCall(DMPlexCreate(comm, dmOverlap));
1965: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
1966: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1967: /* Store the overlap in the new DM */
1968: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
1969: /* Build the new point SF */
1970: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1971: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1972: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1973: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1974: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
1975: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1976: PetscCall(PetscSFDestroy(&sfPoint));
1977: /* Cleanup overlap partition */
1978: PetscCall(DMLabelDestroy(&lblOverlap));
1979: if (sf) *sf = sfOverlap;
1980: else PetscCall(PetscSFDestroy(&sfOverlap));
1981: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1982: PetscFunctionReturn(PETSC_SUCCESS);
1983: }
1985: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1986: {
1987: DM_Plex *mesh = (DM_Plex *)dm->data;
1989: PetscFunctionBegin;
1990: *overlap = mesh->overlap;
1991: PetscFunctionReturn(PETSC_SUCCESS);
1992: }
1994: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
1995: {
1996: DM_Plex *mesh = NULL;
1997: DM_Plex *meshSrc = NULL;
1999: PetscFunctionBegin;
2001: mesh = (DM_Plex *)dm->data;
2002: mesh->overlap = overlap;
2003: if (dmSrc) {
2005: meshSrc = (DM_Plex *)dmSrc->data;
2006: mesh->overlap += meshSrc->overlap;
2007: }
2008: PetscFunctionReturn(PETSC_SUCCESS);
2009: }
2011: /*@
2012: DMPlexGetOverlap - Get the width of the cell overlap
2014: Not Collective
2016: Input Parameter:
2017: . dm - The `DM`
2019: Output Parameter:
2020: . overlap - the width of the cell overlap
2022: Level: intermediate
2024: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2025: @*/
2026: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2027: {
2028: PetscFunctionBegin;
2030: PetscAssertPointer(overlap, 2);
2031: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2032: PetscFunctionReturn(PETSC_SUCCESS);
2033: }
2035: /*@
2036: DMPlexSetOverlap - Set the width of the cell overlap
2038: Logically Collective
2040: Input Parameters:
2041: + dm - The `DM`
2042: . dmSrc - The `DM` that produced this one, or `NULL`
2043: - overlap - the width of the cell overlap
2045: Level: intermediate
2047: Note:
2048: The overlap from `dmSrc` is added to `dm`
2050: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2051: @*/
2052: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2053: {
2054: PetscFunctionBegin;
2057: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2058: PetscFunctionReturn(PETSC_SUCCESS);
2059: }
2061: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2062: {
2063: DM_Plex *mesh = (DM_Plex *)dm->data;
2065: PetscFunctionBegin;
2066: mesh->distDefault = dist;
2067: PetscFunctionReturn(PETSC_SUCCESS);
2068: }
2070: /*@
2071: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2073: Logically Collective
2075: Input Parameters:
2076: + dm - The `DM`
2077: - dist - Flag for distribution
2079: Level: intermediate
2081: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2082: @*/
2083: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2084: {
2085: PetscFunctionBegin;
2088: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2089: PetscFunctionReturn(PETSC_SUCCESS);
2090: }
2092: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2093: {
2094: DM_Plex *mesh = (DM_Plex *)dm->data;
2096: PetscFunctionBegin;
2097: *dist = mesh->distDefault;
2098: PetscFunctionReturn(PETSC_SUCCESS);
2099: }
2101: /*@
2102: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2104: Not Collective
2106: Input Parameter:
2107: . dm - The `DM`
2109: Output Parameter:
2110: . dist - Flag for distribution
2112: Level: intermediate
2114: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2115: @*/
2116: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2117: {
2118: PetscFunctionBegin;
2120: PetscAssertPointer(dist, 2);
2121: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2122: PetscFunctionReturn(PETSC_SUCCESS);
2123: }
2125: /*@C
2126: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2127: root process of the original's communicator.
2129: Collective
2131: Input Parameter:
2132: . dm - the original `DMPLEX` object
2134: Output Parameters:
2135: + sf - the `PetscSF` used for point distribution (optional)
2136: - gatherMesh - the gathered `DM` object, or `NULL`
2138: Level: intermediate
2140: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2141: @*/
2142: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
2143: {
2144: MPI_Comm comm;
2145: PetscMPIInt size;
2146: PetscPartitioner oldPart, gatherPart;
2148: PetscFunctionBegin;
2150: PetscAssertPointer(gatherMesh, 3);
2151: *gatherMesh = NULL;
2152: if (sf) *sf = NULL;
2153: comm = PetscObjectComm((PetscObject)dm);
2154: PetscCallMPI(MPI_Comm_size(comm, &size));
2155: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2156: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2157: PetscCall(PetscObjectReference((PetscObject)oldPart));
2158: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2159: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2160: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2161: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2163: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2164: PetscCall(PetscPartitionerDestroy(&gatherPart));
2165: PetscCall(PetscPartitionerDestroy(&oldPart));
2166: PetscFunctionReturn(PETSC_SUCCESS);
2167: }
2169: /*@C
2170: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2172: Collective
2174: Input Parameter:
2175: . dm - the original `DMPLEX` object
2177: Output Parameters:
2178: + sf - the `PetscSF` used for point distribution (optional)
2179: - redundantMesh - the redundant `DM` object, or `NULL`
2181: Level: intermediate
2183: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2184: @*/
2185: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
2186: {
2187: MPI_Comm comm;
2188: PetscMPIInt size, rank;
2189: PetscInt pStart, pEnd, p;
2190: PetscInt numPoints = -1;
2191: PetscSF migrationSF, sfPoint, gatherSF;
2192: DM gatherDM, dmCoord;
2193: PetscSFNode *points;
2195: PetscFunctionBegin;
2197: PetscAssertPointer(redundantMesh, 3);
2198: *redundantMesh = NULL;
2199: comm = PetscObjectComm((PetscObject)dm);
2200: PetscCallMPI(MPI_Comm_size(comm, &size));
2201: if (size == 1) {
2202: PetscCall(PetscObjectReference((PetscObject)dm));
2203: *redundantMesh = dm;
2204: if (sf) *sf = NULL;
2205: PetscFunctionReturn(PETSC_SUCCESS);
2206: }
2207: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2208: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2209: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2210: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2211: numPoints = pEnd - pStart;
2212: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2213: PetscCall(PetscMalloc1(numPoints, &points));
2214: PetscCall(PetscSFCreate(comm, &migrationSF));
2215: for (p = 0; p < numPoints; p++) {
2216: points[p].index = p;
2217: points[p].rank = 0;
2218: }
2219: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2220: PetscCall(DMPlexCreate(comm, redundantMesh));
2221: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2222: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2223: /* This is to express that all point are in overlap */
2224: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT));
2225: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2226: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2227: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2228: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2229: PetscCall(PetscSFDestroy(&sfPoint));
2230: if (sf) {
2231: PetscSF tsf;
2233: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2234: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2235: PetscCall(PetscSFDestroy(&tsf));
2236: }
2237: PetscCall(PetscSFDestroy(&migrationSF));
2238: PetscCall(PetscSFDestroy(&gatherSF));
2239: PetscCall(DMDestroy(&gatherDM));
2240: PetscCall(DMCopyDisc(dm, *redundantMesh));
2241: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2242: PetscFunctionReturn(PETSC_SUCCESS);
2243: }
2245: /*@
2246: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2248: Collective
2250: Input Parameter:
2251: . dm - The `DM` object
2253: Output Parameter:
2254: . distributed - Flag whether the `DM` is distributed
2256: Level: intermediate
2258: Notes:
2259: This currently finds out whether at least two ranks have any DAG points.
2260: This involves `MPI_Allreduce()` with one integer.
2261: The result is currently not stashed so every call to this routine involves this global communication.
2263: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2264: @*/
2265: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2266: {
2267: PetscInt pStart, pEnd, count;
2268: MPI_Comm comm;
2269: PetscMPIInt size;
2271: PetscFunctionBegin;
2273: PetscAssertPointer(distributed, 2);
2274: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2275: PetscCallMPI(MPI_Comm_size(comm, &size));
2276: if (size == 1) {
2277: *distributed = PETSC_FALSE;
2278: PetscFunctionReturn(PETSC_SUCCESS);
2279: }
2280: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2281: count = (pEnd - pStart) > 0 ? 1 : 0;
2282: PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2283: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2284: PetscFunctionReturn(PETSC_SUCCESS);
2285: }
2287: /*@C
2288: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2290: Input Parameters:
2291: + dm - The `DM`
2292: - name - The name of the specific parallel distribution
2294: Level: developer
2296: Note:
2297: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2298: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2299: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2300: loads the parallel distribution stored in file under this name.
2302: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2303: @*/
2304: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2305: {
2306: DM_Plex *mesh = (DM_Plex *)dm->data;
2308: PetscFunctionBegin;
2310: if (name) PetscAssertPointer(name, 2);
2311: PetscCall(PetscFree(mesh->distributionName));
2312: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2313: PetscFunctionReturn(PETSC_SUCCESS);
2314: }
2316: /*@C
2317: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2319: Input Parameter:
2320: . dm - The `DM`
2322: Output Parameter:
2323: . name - The name of the specific parallel distribution
2325: Level: developer
2327: Note:
2328: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2329: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2330: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2331: loads the parallel distribution stored in file under this name.
2333: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2334: @*/
2335: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2336: {
2337: DM_Plex *mesh = (DM_Plex *)dm->data;
2339: PetscFunctionBegin;
2341: PetscAssertPointer(name, 2);
2342: *name = mesh->distributionName;
2343: PetscFunctionReturn(PETSC_SUCCESS);
2344: }