Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be `NULL` (to clear the callback)
10: - ctx - context for callback evaluation, may be `NULL`
12: Level: advanced
14: Notes:
15: The caller of `DMPlexGetAdjacency()` may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of `DMPLEX` adjacency determination.
19: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
25: PetscFunctionBegin;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: PetscFunctionReturn(PETSC_SUCCESS);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The `DM` object
38: Output Parameters:
39: + user - The callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
50: PetscFunctionBegin;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: PetscFunctionReturn(PETSC_SUCCESS);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The `DM` object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: `DMPLEX`, `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *)dm->data;
72: PetscFunctionBegin;
74: mesh->useAnchors = useAnchors;
75: PetscFunctionReturn(PETSC_SUCCESS);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The `DM` object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: `DMPLEX`, `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *)dm->data;
95: PetscFunctionBegin;
97: PetscAssertPointer(useAnchors, 2);
98: *useAnchors = mesh->useAnchors;
99: PetscFunctionReturn(PETSC_SUCCESS);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
107: PetscFunctionBeginHot;
108: PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109: PetscCall(DMPlexGetCone(dm, p, &cone));
110: for (c = 0; c <= coneSize; ++c) {
111: const PetscInt point = !c ? p : cone[c - 1];
112: const PetscInt *support = NULL;
113: PetscInt supportSize, s, q;
115: PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116: PetscCall(DMPlexGetSupport(dm, point, &support));
117: for (s = 0; s < supportSize; ++s) {
118: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
119: if (support[s] == adj[q]) break;
120: }
121: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
122: }
123: }
124: *adjSize = numAdj;
125: PetscFunctionReturn(PETSC_SUCCESS);
126: }
128: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129: {
130: const PetscInt *support = NULL;
131: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscFunctionBeginHot;
134: PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135: PetscCall(DMPlexGetSupport(dm, p, &support));
136: for (s = 0; s <= supportSize; ++s) {
137: const PetscInt point = !s ? p : support[s - 1];
138: const PetscInt *cone = NULL;
139: PetscInt coneSize, c, q;
141: PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142: PetscCall(DMPlexGetCone(dm, point, &cone));
143: for (c = 0; c < coneSize; ++c) {
144: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
145: if (cone[c] == adj[q]) break;
146: }
147: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
148: }
149: }
150: *adjSize = numAdj;
151: PetscFunctionReturn(PETSC_SUCCESS);
152: }
154: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155: {
156: PetscInt *star = NULL;
157: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
159: PetscFunctionBeginHot;
160: PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161: for (s = 0; s < starSize * 2; s += 2) {
162: const PetscInt *closure = NULL;
163: PetscInt closureSize, c, q;
165: PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
166: for (c = 0; c < closureSize * 2; c += 2) {
167: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
168: if (closure[c] == adj[q]) break;
169: }
170: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
171: }
172: PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
173: }
174: PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175: *adjSize = numAdj;
176: PetscFunctionReturn(PETSC_SUCCESS);
177: }
179: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180: {
181: static PetscInt asiz = 0;
182: PetscInt maxAnchors = 1;
183: PetscInt aStart = -1, aEnd = -1;
184: PetscInt maxAdjSize;
185: PetscSection aSec = NULL;
186: IS aIS = NULL;
187: const PetscInt *anchors;
188: DM_Plex *mesh = (DM_Plex *)dm->data;
190: PetscFunctionBeginHot;
191: if (useAnchors) {
192: PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
193: if (aSec) {
194: PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
195: maxAnchors = PetscMax(1, maxAnchors);
196: PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
197: PetscCall(ISGetIndices(aIS, &anchors));
198: }
199: }
200: if (!*adj) {
201: PetscInt depth, maxC, maxS, maxP, pStart, pEnd;
203: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
204: PetscCall(DMPlexGetDepth(dm, &depth));
205: depth = PetscMax(depth, -depth);
206: PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207: maxP = maxS * maxC;
208: /* Adjacency can be as large as supp(cl(cell)) or cl(supp(vertex)),
209: supp(cell) + supp(maxC faces) + supp(maxC^2 edges) + supp(maxC^3 vertices)
210: = 0 + maxS*maxC + maxS^2*maxC^2 + maxS^3*maxC^3
211: = \sum^d_{i=0} (maxS*maxC)^i - 1
212: = (maxS*maxC)^{d+1} - 1 / (maxS*maxC - 1) - 1
213: We could improve this by getting the max by strata:
214: supp[d](cell) + supp[d-1](maxC[d] faces) + supp[1](maxC[d]*maxC[d-1] edges) + supp[0](maxC[d]*maxC[d-1]*maxC[d-2] vertices)
215: = 0 + maxS[d-1]*maxC[d] + maxS[1]*maxC[d]*maxC[d-1] + maxS[0]*maxC[d]*maxC[d-1]*maxC[d-2]
216: and the same with S and C reversed
217: */
218: if ((depth == 3 && maxP > 200) || (depth == 2 && maxP > 580)) asiz = pEnd - pStart;
219: else asiz = (maxP > 1) ? ((PetscPowInt(maxP, depth + 1) - 1) / (maxP - 1)) : depth + 1;
220: asiz *= maxAnchors;
221: asiz = PetscMin(asiz, pEnd - pStart);
222: PetscCall(PetscMalloc1(asiz, adj));
223: }
224: if (*adjSize < 0) *adjSize = asiz;
225: maxAdjSize = *adjSize;
226: if (mesh->useradjacency) {
227: PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
228: } else if (useTransitiveClosure) {
229: PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
230: } else if (useCone) {
231: PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
232: } else {
233: PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
234: }
235: if (useAnchors && aSec) {
236: PetscInt origSize = *adjSize;
237: PetscInt numAdj = origSize;
238: PetscInt i = 0, j;
239: PetscInt *orig = *adj;
241: while (i < origSize) {
242: PetscInt p = orig[i];
243: PetscInt aDof = 0;
245: if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
246: if (aDof) {
247: PetscInt aOff;
248: PetscInt s, q;
250: for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
251: origSize--;
252: numAdj--;
253: PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
254: for (s = 0; s < aDof; ++s) {
255: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
256: if (anchors[aOff + s] == orig[q]) break;
257: }
258: PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
259: }
260: } else {
261: i++;
262: }
263: }
264: *adjSize = numAdj;
265: PetscCall(ISRestoreIndices(aIS, &anchors));
266: }
267: PetscFunctionReturn(PETSC_SUCCESS);
268: }
270: /*@
271: DMPlexGetAdjacency - Return all points adjacent to the given point
273: Input Parameters:
274: + dm - The `DM` object
275: - p - The point
277: Input/Output Parameters:
278: + adjSize - The maximum size of `adj` if it is non-`NULL`, or `PETSC_DETERMINE`;
279: on output the number of adjacent points
280: - adj - Either `NULL` so that the array is allocated, or an existing array with size `adjSize`;
281: on output contains the adjacent points
283: Level: advanced
285: Notes:
286: The user must `PetscFree()` the `adj` array if it was not passed in.
288: .seealso: `DMPLEX`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
289: @*/
290: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
291: {
292: PetscBool useCone, useClosure, useAnchors;
294: PetscFunctionBeginHot;
296: PetscAssertPointer(adjSize, 3);
297: PetscAssertPointer(adj, 4);
298: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
299: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
300: PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
301: PetscFunctionReturn(PETSC_SUCCESS);
302: }
304: /*@
305: DMPlexCreateTwoSidedProcessSF - Create an `PetscSF` which just has process connectivity
307: Collective
309: Input Parameters:
310: + dm - The `DM`
311: . sfPoint - The `PetscSF` which encodes point connectivity
312: . rootRankSection - to be documented
313: . rootRanks - to be documented
314: . leafRankSection - to be documented
315: - leafRanks - to be documented
317: Output Parameters:
318: + processRanks - A list of process neighbors, or `NULL`
319: - sfProcess - An `PetscSF` encoding the two-sided process connectivity, or `NULL`
321: Level: developer
323: .seealso: `DMPLEX`, `PetscSFCreate()`, `DMPlexCreateProcessSF()`
324: @*/
325: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
326: {
327: const PetscSFNode *remotePoints;
328: PetscInt *localPointsNew;
329: PetscSFNode *remotePointsNew;
330: const PetscInt *nranks;
331: PetscInt *ranksNew;
332: PetscBT neighbors;
333: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
334: PetscMPIInt size, proc, rank;
336: PetscFunctionBegin;
339: if (processRanks) PetscAssertPointer(processRanks, 7);
340: if (sfProcess) PetscAssertPointer(sfProcess, 8);
341: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
342: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
343: PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
344: PetscCall(PetscBTCreate(size, &neighbors));
345: PetscCall(PetscBTMemzero(size, neighbors));
346: /* Compute root-to-leaf process connectivity */
347: PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
348: PetscCall(ISGetIndices(rootRanks, &nranks));
349: for (p = pStart; p < pEnd; ++p) {
350: PetscInt ndof, noff, n;
352: PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
353: PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
354: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
355: }
356: PetscCall(ISRestoreIndices(rootRanks, &nranks));
357: /* Compute leaf-to-neighbor process connectivity */
358: PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
359: PetscCall(ISGetIndices(leafRanks, &nranks));
360: for (p = pStart; p < pEnd; ++p) {
361: PetscInt ndof, noff, n;
363: PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
364: PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
365: for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
366: }
367: PetscCall(ISRestoreIndices(leafRanks, &nranks));
368: /* Compute leaf-to-root process connectivity */
369: for (l = 0; l < numLeaves; ++l) PetscCall(PetscBTSet(neighbors, remotePoints[l].rank));
370: /* Calculate edges */
371: PetscCall(PetscBTClear(neighbors, rank));
372: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
373: if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
374: }
375: PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
376: PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
377: PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
378: for (proc = 0, n = 0; proc < size; ++proc) {
379: if (PetscBTLookup(neighbors, proc)) {
380: ranksNew[n] = proc;
381: localPointsNew[n] = proc;
382: remotePointsNew[n].index = rank;
383: remotePointsNew[n].rank = proc;
384: ++n;
385: }
386: }
387: PetscCall(PetscBTDestroy(&neighbors));
388: if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
389: else PetscCall(PetscFree(ranksNew));
390: if (sfProcess) {
391: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
392: PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
393: PetscCall(PetscSFSetFromOptions(*sfProcess));
394: PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
395: }
396: PetscFunctionReturn(PETSC_SUCCESS);
397: }
399: /*@
400: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
402: Collective
404: Input Parameter:
405: . dm - The `DM`
407: Output Parameters:
408: + rootSection - The number of leaves for a given root point
409: . rootrank - The rank of each edge into the root point
410: . leafSection - The number of processes sharing a given leaf point
411: - leafrank - The rank of each process sharing a leaf point
413: Level: developer
415: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
416: @*/
417: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
418: {
419: MPI_Comm comm;
420: PetscSF sfPoint;
421: const PetscInt *rootdegree;
422: PetscInt *myrank, *remoterank;
423: PetscInt pStart, pEnd, p, nedges;
424: PetscMPIInt rank;
426: PetscFunctionBegin;
427: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
428: PetscCallMPI(MPI_Comm_rank(comm, &rank));
429: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
430: PetscCall(DMGetPointSF(dm, &sfPoint));
431: /* Compute number of leaves for each root */
432: PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
433: PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
434: PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
435: PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
436: for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
437: PetscCall(PetscSectionSetUp(rootSection));
438: /* Gather rank of each leaf to root */
439: PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
440: PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
441: PetscCall(PetscMalloc1(nedges, &remoterank));
442: for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
443: PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
444: PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
445: PetscCall(PetscFree(myrank));
446: PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
447: /* Distribute remote ranks to leaves */
448: PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
449: PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
450: PetscFunctionReturn(PETSC_SUCCESS);
451: }
453: /*@C
454: DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
456: Collective
458: Input Parameters:
459: + dm - The `DM`
460: . levels - Number of overlap levels
461: . rootSection - The number of leaves for a given root point
462: . rootrank - The rank of each edge into the root point
463: . leafSection - The number of processes sharing a given leaf point
464: - leafrank - The rank of each process sharing a leaf point
466: Output Parameter:
467: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
469: Level: developer
471: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
472: @*/
473: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
474: {
475: MPI_Comm comm;
476: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
477: PetscSF sfPoint;
478: const PetscSFNode *remote;
479: const PetscInt *local;
480: const PetscInt *nrank, *rrank;
481: PetscInt *adj = NULL;
482: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
483: PetscMPIInt rank, size;
484: PetscBool flg;
486: PetscFunctionBegin;
487: *ovLabel = NULL;
488: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
489: PetscCallMPI(MPI_Comm_size(comm, &size));
490: PetscCallMPI(MPI_Comm_rank(comm, &rank));
491: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
492: PetscCall(DMGetPointSF(dm, &sfPoint));
493: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
494: if (!levels) {
495: /* Add owned points */
496: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
497: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
498: PetscFunctionReturn(PETSC_SUCCESS);
499: }
500: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
501: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
502: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
503: /* Handle leaves: shared with the root point */
504: for (l = 0; l < nleaves; ++l) {
505: PetscInt adjSize = PETSC_DETERMINE, a;
507: PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
508: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
509: }
510: PetscCall(ISGetIndices(rootrank, &rrank));
511: PetscCall(ISGetIndices(leafrank, &nrank));
512: /* Handle roots */
513: for (p = pStart; p < pEnd; ++p) {
514: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
516: if ((p >= sStart) && (p < sEnd)) {
517: /* Some leaves share a root with other leaves on different processes */
518: PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
519: if (neighbors) {
520: PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
521: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
522: for (n = 0; n < neighbors; ++n) {
523: const PetscInt remoteRank = nrank[noff + n];
525: if (remoteRank == rank) continue;
526: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
527: }
528: }
529: }
530: /* Roots are shared with leaves */
531: PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
532: if (!neighbors) continue;
533: PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
534: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
535: for (n = 0; n < neighbors; ++n) {
536: const PetscInt remoteRank = rrank[noff + n];
538: if (remoteRank == rank) continue;
539: for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
540: }
541: }
542: PetscCall(PetscFree(adj));
543: PetscCall(ISRestoreIndices(rootrank, &rrank));
544: PetscCall(ISRestoreIndices(leafrank, &nrank));
545: /* Add additional overlap levels */
546: for (l = 1; l < levels; l++) {
547: /* Propagate point donations over SF to capture remote connections */
548: PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
549: /* Add next level of point donations to the label */
550: PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
551: }
552: /* We require the closure in the overlap */
553: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
554: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
555: if (flg) {
556: PetscViewer viewer;
557: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
558: PetscCall(DMLabelView(ovAdjByRank, viewer));
559: }
560: /* Invert sender to receiver label */
561: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
562: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
563: /* Add owned points, except for shared local points */
564: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
565: for (l = 0; l < nleaves; ++l) {
566: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
567: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
568: }
569: /* Clean up */
570: PetscCall(DMLabelDestroy(&ovAdjByRank));
571: PetscFunctionReturn(PETSC_SUCCESS);
572: }
574: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
575: {
576: PetscInt neighbors, el;
578: PetscFunctionBegin;
579: PetscCall(PetscSectionGetDof(section, p, &neighbors));
580: if (neighbors) {
581: PetscInt *adj = NULL;
582: PetscInt adjSize = PETSC_DETERMINE, noff, n, a;
583: PetscMPIInt rank;
585: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
586: PetscCall(PetscSectionGetOffset(section, p, &noff));
587: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
588: for (n = 0; n < neighbors; ++n) {
589: const PetscInt remoteRank = ranks[noff + n];
591: if (remoteRank == rank) continue;
592: for (a = 0; a < adjSize; ++a) {
593: PetscBool insert = PETSC_TRUE;
595: for (el = 0; el < numExLabels; ++el) {
596: PetscInt exVal;
597: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
598: if (exVal == exValue[el]) {
599: insert = PETSC_FALSE;
600: break;
601: }
602: }
603: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
604: }
605: }
606: PetscCall(PetscFree(adj));
607: }
608: PetscFunctionReturn(PETSC_SUCCESS);
609: }
611: /*@C
612: DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
614: Collective
616: Input Parameters:
617: + dm - The `DM`
618: . numLabels - The number of labels to draw candidate points from
619: . label - An array of labels containing candidate points
620: . value - An array of label values marking the candidate points
621: . numExLabels - The number of labels to use for exclusion
622: . exLabel - An array of labels indicating points to be excluded, or `NULL`
623: . exValue - An array of label values to be excluded, or `NULL`
624: . rootSection - The number of leaves for a given root point
625: . rootrank - The rank of each edge into the root point
626: . leafSection - The number of processes sharing a given leaf point
627: - leafrank - The rank of each process sharing a leaf point
629: Output Parameter:
630: . ovLabel - `DMLabel` containing remote overlap contributions as point/rank pairings
632: Level: developer
634: Note:
635: The candidate points are only added to the overlap if they are adjacent to a shared point
637: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
638: @*/
639: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
640: {
641: MPI_Comm comm;
642: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
643: PetscSF sfPoint;
644: const PetscSFNode *remote;
645: const PetscInt *local;
646: const PetscInt *nrank, *rrank;
647: PetscInt *adj = NULL;
648: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
649: PetscMPIInt rank, size;
650: PetscBool flg;
652: PetscFunctionBegin;
653: *ovLabel = NULL;
654: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
655: PetscCallMPI(MPI_Comm_size(comm, &size));
656: PetscCallMPI(MPI_Comm_rank(comm, &rank));
657: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
658: PetscCall(DMGetPointSF(dm, &sfPoint));
659: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
660: PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
661: PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
662: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
663: PetscCall(ISGetIndices(rootrank, &rrank));
664: PetscCall(ISGetIndices(leafrank, &nrank));
665: for (l = 0; l < numLabels; ++l) {
666: IS valIS;
667: const PetscInt *points;
668: PetscInt n;
670: PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
671: if (!valIS) continue;
672: PetscCall(ISGetIndices(valIS, &points));
673: PetscCall(ISGetLocalSize(valIS, &n));
674: for (PetscInt i = 0; i < n; ++i) {
675: const PetscInt p = points[i];
677: if ((p >= sStart) && (p < sEnd)) {
678: PetscInt loc, adjSize = PETSC_DETERMINE;
680: /* Handle leaves: shared with the root point */
681: if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
682: else loc = (p >= 0 && p < nleaves) ? p : -1;
683: if (loc >= 0) {
684: const PetscInt remoteRank = remote[loc].rank;
686: PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
687: for (PetscInt a = 0; a < adjSize; ++a) {
688: PetscBool insert = PETSC_TRUE;
690: for (el = 0; el < numExLabels; ++el) {
691: PetscInt exVal;
692: PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
693: if (exVal == exValue[el]) {
694: insert = PETSC_FALSE;
695: break;
696: }
697: }
698: if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
699: }
700: }
701: /* Some leaves share a root with other leaves on different processes */
702: PetscCall(HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank));
703: }
704: /* Roots are shared with leaves */
705: PetscCall(HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank));
706: }
707: PetscCall(ISRestoreIndices(valIS, &points));
708: PetscCall(ISDestroy(&valIS));
709: }
710: PetscCall(PetscFree(adj));
711: PetscCall(ISRestoreIndices(rootrank, &rrank));
712: PetscCall(ISRestoreIndices(leafrank, &nrank));
713: /* We require the closure in the overlap */
714: PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
715: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
716: if (flg) {
717: PetscViewer viewer;
718: PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
719: PetscCall(DMLabelView(ovAdjByRank, viewer));
720: }
721: /* Invert sender to receiver label */
722: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
723: PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
724: /* Add owned points, except for shared local points */
725: for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
726: for (l = 0; l < nleaves; ++l) {
727: PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
728: PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
729: }
730: /* Clean up */
731: PetscCall(DMLabelDestroy(&ovAdjByRank));
732: PetscFunctionReturn(PETSC_SUCCESS);
733: }
735: /*@C
736: DMPlexCreateOverlapMigrationSF - Create a `PetscSF` describing the new mesh distribution to make the overlap described by the input `PetscSF`
738: Collective
740: Input Parameters:
741: + dm - The `DM`
742: - overlapSF - The `PetscSF` mapping ghost points in overlap to owner points on other processes
744: Output Parameter:
745: . migrationSF - A `PetscSF` that maps original points in old locations to points in new locations
747: Level: developer
749: .seealso: `DMPLEX`, `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
750: @*/
751: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
752: {
753: MPI_Comm comm;
754: PetscMPIInt rank, size;
755: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
756: PetscInt *pointDepths, *remoteDepths, *ilocal;
757: PetscInt *depthRecv, *depthShift, *depthIdx;
758: PetscSFNode *iremote;
759: PetscSF pointSF;
760: const PetscInt *sharedLocal;
761: const PetscSFNode *overlapRemote, *sharedRemote;
763: PetscFunctionBegin;
765: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
766: PetscCallMPI(MPI_Comm_rank(comm, &rank));
767: PetscCallMPI(MPI_Comm_size(comm, &size));
768: PetscCall(DMGetDimension(dm, &dim));
770: /* Before building the migration SF we need to know the new stratum offsets */
771: PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
772: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
773: for (d = 0; d < dim + 1; d++) {
774: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
775: for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
776: }
777: for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
778: PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
779: PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
781: /* Count received points in each stratum and compute the internal strata shift */
782: PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
783: for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
784: for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
785: depthShift[dim] = 0;
786: for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
787: for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
788: for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
789: for (d = 0; d < dim + 1; d++) {
790: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
791: depthIdx[d] = pStart + depthShift[d];
792: }
794: /* Form the overlap SF build an SF that describes the full overlap migration SF */
795: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
796: newLeaves = pEnd - pStart + nleaves;
797: PetscCall(PetscMalloc1(newLeaves, &ilocal));
798: PetscCall(PetscMalloc1(newLeaves, &iremote));
799: /* First map local points to themselves */
800: for (d = 0; d < dim + 1; d++) {
801: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
802: for (p = pStart; p < pEnd; p++) {
803: point = p + depthShift[d];
804: ilocal[point] = point;
805: iremote[point].index = p;
806: iremote[point].rank = rank;
807: depthIdx[d]++;
808: }
809: }
811: /* Add in the remote roots for currently shared points */
812: PetscCall(DMGetPointSF(dm, &pointSF));
813: PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
814: for (d = 0; d < dim + 1; d++) {
815: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
816: for (p = 0; p < numSharedPoints; p++) {
817: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
818: point = sharedLocal[p] + depthShift[d];
819: iremote[point].index = sharedRemote[p].index;
820: iremote[point].rank = sharedRemote[p].rank;
821: }
822: }
823: }
825: /* Now add the incoming overlap points */
826: for (p = 0; p < nleaves; p++) {
827: point = depthIdx[remoteDepths[p]];
828: ilocal[point] = point;
829: iremote[point].index = overlapRemote[p].index;
830: iremote[point].rank = overlapRemote[p].rank;
831: depthIdx[remoteDepths[p]]++;
832: }
833: PetscCall(PetscFree2(pointDepths, remoteDepths));
835: PetscCall(PetscSFCreate(comm, migrationSF));
836: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
837: PetscCall(PetscSFSetFromOptions(*migrationSF));
838: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
839: PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
841: PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
842: PetscFunctionReturn(PETSC_SUCCESS);
843: }
845: /*@
846: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
848: Input Parameters:
849: + dm - The DM
850: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
852: Output Parameter:
853: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
855: Level: developer
857: Note:
858: This lexicographically sorts by (depth, cellType)
860: .seealso: `DMPLEX`, `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
861: @*/
862: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
863: {
864: MPI_Comm comm;
865: PetscMPIInt rank, size;
866: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
867: PetscSFNode *pointDepths, *remoteDepths;
868: PetscInt *ilocal;
869: PetscInt *depthRecv, *depthShift, *depthIdx;
870: PetscInt *ctRecv, *ctShift, *ctIdx;
871: const PetscSFNode *iremote;
873: PetscFunctionBegin;
875: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
876: PetscCallMPI(MPI_Comm_rank(comm, &rank));
877: PetscCallMPI(MPI_Comm_size(comm, &size));
878: PetscCall(DMPlexGetDepth(dm, &ldepth));
879: PetscCall(DMGetDimension(dm, &dim));
880: PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
881: PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
882: PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
884: /* Before building the migration SF we need to know the new stratum offsets */
885: PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
886: PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
887: for (d = 0; d < depth + 1; ++d) {
888: PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
889: for (p = pStart; p < pEnd; ++p) {
890: DMPolytopeType ct;
892: PetscCall(DMPlexGetCellType(dm, p, &ct));
893: pointDepths[p].index = d;
894: pointDepths[p].rank = ct;
895: }
896: }
897: for (p = 0; p < nleaves; ++p) {
898: remoteDepths[p].index = -1;
899: remoteDepths[p].rank = -1;
900: }
901: PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
902: PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
903: /* Count received points in each stratum and compute the internal strata shift */
904: PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
905: for (p = 0; p < nleaves; ++p) {
906: if (remoteDepths[p].rank < 0) {
907: ++depthRecv[remoteDepths[p].index];
908: } else {
909: ++ctRecv[remoteDepths[p].rank];
910: }
911: }
912: {
913: PetscInt depths[4], dims[4], shift = 0, i, c;
915: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
916: Consider DM_POLYTOPE_FV_GHOST, DM_POLYTOPE_INTERIOR_GHOST and DM_POLYTOPE_UNKNOWN_CELL as cells
917: */
918: depths[0] = depth;
919: depths[1] = 0;
920: depths[2] = depth - 1;
921: depths[3] = 1;
922: dims[0] = dim;
923: dims[1] = 0;
924: dims[2] = dim - 1;
925: dims[3] = 1;
926: for (i = 0; i <= depth; ++i) {
927: const PetscInt dep = depths[i];
928: const PetscInt dim = dims[i];
930: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
931: if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST || c == DM_POLYTOPE_UNKNOWN_CELL))) continue;
932: ctShift[c] = shift;
933: shift += ctRecv[c];
934: }
935: depthShift[dep] = shift;
936: shift += depthRecv[dep];
937: }
938: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
939: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
941: if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST && c != DM_POLYTOPE_UNKNOWN_CELL)) {
942: ctShift[c] = shift;
943: shift += ctRecv[c];
944: }
945: }
946: }
947: /* Derive a new local permutation based on stratified indices */
948: PetscCall(PetscMalloc1(nleaves, &ilocal));
949: for (p = 0; p < nleaves; ++p) {
950: const PetscInt dep = remoteDepths[p].index;
951: const DMPolytopeType ct = (DMPolytopeType)remoteDepths[p].rank;
953: if ((PetscInt)ct < 0) {
954: ilocal[p] = depthShift[dep] + depthIdx[dep];
955: ++depthIdx[dep];
956: } else {
957: ilocal[p] = ctShift[ct] + ctIdx[ct];
958: ++ctIdx[ct];
959: }
960: }
961: PetscCall(PetscSFCreate(comm, migrationSF));
962: PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
963: PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
964: PetscCall(PetscFree2(pointDepths, remoteDepths));
965: PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
966: PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
967: PetscFunctionReturn(PETSC_SUCCESS);
968: }
970: /*@
971: DMPlexDistributeField - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
973: Collective
975: Input Parameters:
976: + dm - The `DMPLEX` object
977: . pointSF - The `PetscSF` describing the communication pattern
978: . originalSection - The `PetscSection` for existing data layout
979: - originalVec - The existing data in a local vector
981: Output Parameters:
982: + newSection - The `PetscSF` describing the new data layout
983: - newVec - The new data in a local vector
985: Level: developer
987: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
988: @*/
989: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
990: {
991: PetscSF fieldSF;
992: PetscInt *remoteOffsets, fieldSize;
993: PetscScalar *originalValues, *newValues;
995: PetscFunctionBegin;
996: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
997: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
999: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1000: PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
1001: PetscCall(VecSetType(newVec, dm->vectype));
1003: PetscCall(VecGetArray(originalVec, &originalValues));
1004: PetscCall(VecGetArray(newVec, &newValues));
1005: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1006: PetscCall(PetscFree(remoteOffsets));
1007: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1008: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1009: PetscCall(PetscSFDestroy(&fieldSF));
1010: PetscCall(VecRestoreArray(newVec, &newValues));
1011: PetscCall(VecRestoreArray(originalVec, &originalValues));
1012: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1013: PetscFunctionReturn(PETSC_SUCCESS);
1014: }
1016: /*@
1017: DMPlexDistributeFieldIS - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1019: Collective
1021: Input Parameters:
1022: + dm - The `DMPLEX` object
1023: . pointSF - The `PetscSF` describing the communication pattern
1024: . originalSection - The `PetscSection` for existing data layout
1025: - originalIS - The existing data
1027: Output Parameters:
1028: + newSection - The `PetscSF` describing the new data layout
1029: - newIS - The new data
1031: Level: developer
1033: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1034: @*/
1035: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1036: {
1037: PetscSF fieldSF;
1038: PetscInt *newValues, *remoteOffsets, fieldSize;
1039: const PetscInt *originalValues;
1041: PetscFunctionBegin;
1042: PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1043: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1045: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1046: PetscCall(PetscMalloc1(fieldSize, &newValues));
1048: PetscCall(ISGetIndices(originalIS, &originalValues));
1049: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1050: PetscCall(PetscFree(remoteOffsets));
1051: PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1052: PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1053: PetscCall(PetscSFDestroy(&fieldSF));
1054: PetscCall(ISRestoreIndices(originalIS, &originalValues));
1055: PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1056: PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1057: PetscFunctionReturn(PETSC_SUCCESS);
1058: }
1060: /*@
1061: DMPlexDistributeData - Distribute field data to match a given `PetscSF`, usually the `PetscSF` from mesh distribution
1063: Collective
1065: Input Parameters:
1066: + dm - The `DMPLEX` object
1067: . pointSF - The `PetscSF` describing the communication pattern
1068: . originalSection - The `PetscSection` for existing data layout
1069: . datatype - The type of data
1070: - originalData - The existing data
1072: Output Parameters:
1073: + newSection - The `PetscSection` describing the new data layout
1074: - newData - The new data
1076: Level: developer
1078: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexDistributeField()`
1079: @*/
1080: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1081: {
1082: PetscSF fieldSF;
1083: PetscInt *remoteOffsets, fieldSize;
1084: PetscMPIInt dataSize;
1086: PetscFunctionBegin;
1087: PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1088: PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1090: PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1091: PetscCallMPI(MPI_Type_size(datatype, &dataSize));
1092: PetscCall(PetscMalloc(fieldSize * dataSize, newData));
1094: PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1095: PetscCall(PetscFree(remoteOffsets));
1096: PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1097: PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1098: PetscCall(PetscSFDestroy(&fieldSF));
1099: PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1100: PetscFunctionReturn(PETSC_SUCCESS);
1101: }
1103: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1104: {
1105: DM_Plex *pmesh = (DM_Plex *)(dmParallel)->data;
1106: MPI_Comm comm;
1107: PetscSF coneSF;
1108: PetscSection originalConeSection, newConeSection;
1109: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1110: PetscBool flg;
1112: PetscFunctionBegin;
1115: PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1116: /* Distribute cone section */
1117: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1118: PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1119: PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1120: PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1121: PetscCall(DMSetUp(dmParallel));
1122: /* Communicate and renumber cones */
1123: PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1124: PetscCall(PetscFree(remoteOffsets));
1125: PetscCall(DMPlexGetCones(dm, &cones));
1126: if (original) {
1127: PetscInt numCones;
1129: PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1130: PetscCall(PetscMalloc1(numCones, &globCones));
1131: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1132: } else {
1133: globCones = cones;
1134: }
1135: PetscCall(DMPlexGetCones(dmParallel, &newCones));
1136: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1137: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1138: if (original) PetscCall(PetscFree(globCones));
1139: PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1140: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1141: if (PetscDefined(USE_DEBUG)) {
1142: PetscInt p;
1143: PetscBool valid = PETSC_TRUE;
1144: for (p = 0; p < newConesSize; ++p) {
1145: if (newCones[p] < 0) {
1146: valid = PETSC_FALSE;
1147: PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1148: }
1149: }
1150: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1151: }
1152: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1153: if (flg) {
1154: PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1155: PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1156: PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1157: PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1158: PetscCall(PetscSFView(coneSF, NULL));
1159: }
1160: PetscCall(DMPlexGetConeOrientations(dm, &cones));
1161: PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1162: PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1163: PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1164: PetscCall(PetscSFDestroy(&coneSF));
1165: PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1166: /* Create supports and stratify DMPlex */
1167: {
1168: PetscInt pStart, pEnd;
1170: PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1171: PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1172: }
1173: PetscCall(DMPlexSymmetrize(dmParallel));
1174: PetscCall(DMPlexStratify(dmParallel));
1175: {
1176: PetscBool useCone, useClosure, useAnchors;
1178: PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1179: PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1180: PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1181: PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1182: }
1183: PetscFunctionReturn(PETSC_SUCCESS);
1184: }
1186: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1187: {
1188: MPI_Comm comm;
1189: DM cdm, cdmParallel;
1190: PetscSection originalCoordSection, newCoordSection;
1191: Vec originalCoordinates, newCoordinates;
1192: PetscInt bs;
1193: const char *name;
1194: const PetscReal *maxCell, *Lstart, *L;
1196: PetscFunctionBegin;
1200: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1201: PetscCall(DMGetCoordinateDM(dm, &cdm));
1202: PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1203: PetscCall(DMCopyDisc(cdm, cdmParallel));
1204: PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1205: PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1206: PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1207: if (originalCoordinates) {
1208: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1209: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1210: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1212: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1213: PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1214: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1215: PetscCall(VecSetBlockSize(newCoordinates, bs));
1216: PetscCall(VecDestroy(&newCoordinates));
1217: }
1219: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1220: PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1221: PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1222: PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1223: if (cdm) {
1224: PetscCall(DMClone(dmParallel, &cdmParallel));
1225: PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1226: PetscCall(DMCopyDisc(cdm, cdmParallel));
1227: PetscCall(DMDestroy(&cdmParallel));
1228: PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1229: PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1230: PetscCall(PetscSectionCreate(comm, &newCoordSection));
1231: if (originalCoordinates) {
1232: PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1233: PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1234: PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1236: PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1237: PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1238: PetscCall(VecSetBlockSize(newCoordinates, bs));
1239: PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1240: PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1241: PetscCall(VecDestroy(&newCoordinates));
1242: }
1243: PetscCall(PetscSectionDestroy(&newCoordSection));
1244: }
1245: PetscFunctionReturn(PETSC_SUCCESS);
1246: }
1248: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1249: {
1250: DM_Plex *mesh = (DM_Plex *)dm->data;
1251: MPI_Comm comm;
1252: DMLabel depthLabel;
1253: PetscMPIInt rank;
1254: PetscInt depth, d, numLabels, numLocalLabels, l;
1255: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1256: PetscObjectState depthState = -1;
1258: PetscFunctionBegin;
1262: PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1263: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1264: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1266: /* If the user has changed the depth label, communicate it instead */
1267: PetscCall(DMPlexGetDepth(dm, &depth));
1268: PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1269: if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1270: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1271: PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1272: if (sendDepth) {
1273: PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1274: PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1275: }
1276: /* Everyone must have either the same number of labels, or none */
1277: PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1278: numLabels = numLocalLabels;
1279: PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1280: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1281: for (l = 0; l < numLabels; ++l) {
1282: DMLabel label = NULL, labelNew = NULL;
1283: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1284: const char *name = NULL;
1286: if (hasLabels) {
1287: PetscCall(DMGetLabelByNum(dm, l, &label));
1288: /* Skip "depth" because it is recreated */
1289: PetscCall(PetscObjectGetName((PetscObject)label, &name));
1290: PetscCall(PetscStrcmp(name, "depth", &isDepth));
1291: } else {
1292: isDepth = PETSC_FALSE;
1293: }
1294: PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1295: if (isDepth && !sendDepth) continue;
1296: PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1297: if (isDepth) {
1298: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1299: PetscInt gdepth;
1301: PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1302: PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1303: for (d = 0; d <= gdepth; ++d) {
1304: PetscBool has;
1306: PetscCall(DMLabelHasStratum(labelNew, d, &has));
1307: if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1308: }
1309: }
1310: PetscCall(DMAddLabel(dmParallel, labelNew));
1311: /* Put the output flag in the new label */
1312: if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1313: PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1314: PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1315: PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1316: PetscCall(DMLabelDestroy(&labelNew));
1317: }
1318: {
1319: DMLabel ctLabel;
1321: // Reset label for fast lookup
1322: PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1323: PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1324: }
1325: PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1326: PetscFunctionReturn(PETSC_SUCCESS);
1327: }
1329: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1330: {
1331: DM_Plex *mesh = (DM_Plex *)dm->data;
1332: DM_Plex *pmesh = (DM_Plex *)(dmParallel)->data;
1333: MPI_Comm comm;
1334: DM refTree;
1335: PetscSection origParentSection, newParentSection;
1336: PetscInt *origParents, *origChildIDs;
1337: PetscBool flg;
1339: PetscFunctionBegin;
1342: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1344: /* Set up tree */
1345: PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1346: PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1347: PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1348: if (origParentSection) {
1349: PetscInt pStart, pEnd;
1350: PetscInt *newParents, *newChildIDs, *globParents;
1351: PetscInt *remoteOffsetsParents, newParentSize;
1352: PetscSF parentSF;
1354: PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1355: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1356: PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1357: PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1358: PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1359: PetscCall(PetscFree(remoteOffsetsParents));
1360: PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1361: PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1362: if (original) {
1363: PetscInt numParents;
1365: PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1366: PetscCall(PetscMalloc1(numParents, &globParents));
1367: PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1368: } else {
1369: globParents = origParents;
1370: }
1371: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1372: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1373: if (original) PetscCall(PetscFree(globParents));
1374: PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1375: PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1376: PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1377: if (PetscDefined(USE_DEBUG)) {
1378: PetscInt p;
1379: PetscBool valid = PETSC_TRUE;
1380: for (p = 0; p < newParentSize; ++p) {
1381: if (newParents[p] < 0) {
1382: valid = PETSC_FALSE;
1383: PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1384: }
1385: }
1386: PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1387: }
1388: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1389: if (flg) {
1390: PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1391: PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1392: PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1393: PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1394: PetscCall(PetscSFView(parentSF, NULL));
1395: }
1396: PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1397: PetscCall(PetscSectionDestroy(&newParentSection));
1398: PetscCall(PetscFree2(newParents, newChildIDs));
1399: PetscCall(PetscSFDestroy(&parentSF));
1400: }
1401: pmesh->useAnchors = mesh->useAnchors;
1402: PetscFunctionReturn(PETSC_SUCCESS);
1403: }
1405: /*@
1406: DMPlexSetPartitionBalance - Should distribution of the `DM` attempt to balance the shared point partition?
1408: Input Parameters:
1409: + dm - The `DMPLEX` object
1410: - flg - Balance the partition?
1412: Level: intermediate
1414: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1415: @*/
1416: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1417: {
1418: DM_Plex *mesh = (DM_Plex *)dm->data;
1420: PetscFunctionBegin;
1421: mesh->partitionBalance = flg;
1422: PetscFunctionReturn(PETSC_SUCCESS);
1423: }
1425: /*@
1426: DMPlexGetPartitionBalance - Does distribution of the `DM` attempt to balance the shared point partition?
1428: Input Parameter:
1429: . dm - The `DMPLEX` object
1431: Output Parameter:
1432: . flg - Balance the partition?
1434: Level: intermediate
1436: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1437: @*/
1438: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1439: {
1440: DM_Plex *mesh = (DM_Plex *)dm->data;
1442: PetscFunctionBegin;
1444: PetscAssertPointer(flg, 2);
1445: *flg = mesh->partitionBalance;
1446: PetscFunctionReturn(PETSC_SUCCESS);
1447: }
1449: typedef struct {
1450: PetscInt vote, rank, index;
1451: } Petsc3Int;
1453: /* MaxLoc, but carry a third piece of information around */
1454: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1455: {
1456: Petsc3Int *a = (Petsc3Int *)inout_;
1457: Petsc3Int *b = (Petsc3Int *)in_;
1458: PetscInt i, len = *len_;
1459: for (i = 0; i < len; i++) {
1460: if (a[i].vote < b[i].vote) {
1461: a[i].vote = b[i].vote;
1462: a[i].rank = b[i].rank;
1463: a[i].index = b[i].index;
1464: } else if (a[i].vote <= b[i].vote) {
1465: if (a[i].rank >= b[i].rank) {
1466: a[i].rank = b[i].rank;
1467: a[i].index = b[i].index;
1468: }
1469: }
1470: }
1471: }
1473: /*@C
1474: DMPlexCreatePointSF - Build a point `PetscSF` from an `PetscSF` describing a point migration
1476: Input Parameters:
1477: + dm - The source `DMPLEX` object
1478: . migrationSF - The star forest that describes the parallel point remapping
1479: - ownership - Flag causing a vote to determine point ownership
1481: Output Parameter:
1482: . pointSF - The star forest describing the point overlap in the remapped `DM`
1484: Level: developer
1486: Note:
1487: Output `pointSF` is guaranteed to have the array of local indices (leaves) sorted.
1489: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1490: @*/
1491: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1492: {
1493: PetscMPIInt rank, size;
1494: PetscInt p, nroots, nleaves, idx, npointLeaves;
1495: PetscInt *pointLocal;
1496: const PetscInt *leaves;
1497: const PetscSFNode *roots;
1498: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1499: Vec shifts;
1500: const PetscInt numShifts = 13759;
1501: const PetscScalar *shift = NULL;
1502: const PetscBool shiftDebug = PETSC_FALSE;
1503: PetscBool balance;
1505: PetscFunctionBegin;
1507: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1508: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1509: PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1510: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1511: PetscCall(PetscSFSetFromOptions(*pointSF));
1512: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1514: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1515: PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1516: PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1517: if (ownership) {
1518: MPI_Op op;
1519: MPI_Datatype datatype;
1520: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1521: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1522: if (balance) {
1523: PetscRandom r;
1525: PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1526: PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1527: PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1528: PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1529: PetscCall(VecSetType(shifts, VECSTANDARD));
1530: PetscCall(VecSetRandom(shifts, r));
1531: PetscCall(PetscRandomDestroy(&r));
1532: PetscCall(VecGetArrayRead(shifts, &shift));
1533: }
1535: PetscCall(PetscMalloc1(nroots, &rootVote));
1536: PetscCall(PetscMalloc1(nleaves, &leafVote));
1537: /* Point ownership vote: Process with highest rank owns shared points */
1538: for (p = 0; p < nleaves; ++p) {
1539: if (shiftDebug) {
1540: PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1541: (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1542: }
1543: /* Either put in a bid or we know we own it */
1544: leafVote[p].vote = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1545: leafVote[p].rank = rank;
1546: leafVote[p].index = p;
1547: }
1548: for (p = 0; p < nroots; p++) {
1549: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1550: rootVote[p].vote = -3;
1551: rootVote[p].rank = -3;
1552: rootVote[p].index = -3;
1553: }
1554: PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1555: PetscCallMPI(MPI_Type_commit(&datatype));
1556: PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1557: PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1558: PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1559: PetscCallMPI(MPI_Op_free(&op));
1560: PetscCallMPI(MPI_Type_free(&datatype));
1561: for (p = 0; p < nroots; p++) {
1562: rootNodes[p].rank = rootVote[p].rank;
1563: rootNodes[p].index = rootVote[p].index;
1564: }
1565: PetscCall(PetscFree(leafVote));
1566: PetscCall(PetscFree(rootVote));
1567: } else {
1568: for (p = 0; p < nroots; p++) {
1569: rootNodes[p].index = -1;
1570: rootNodes[p].rank = rank;
1571: }
1572: for (p = 0; p < nleaves; p++) {
1573: /* Write new local id into old location */
1574: if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1575: }
1576: }
1577: PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1578: PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1580: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1581: if (leafNodes[p].rank != rank) npointLeaves++;
1582: }
1583: PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1584: PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1585: for (idx = 0, p = 0; p < nleaves; p++) {
1586: if (leafNodes[p].rank != rank) {
1587: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1588: pointLocal[idx] = p;
1589: pointRemote[idx] = leafNodes[p];
1590: idx++;
1591: }
1592: }
1593: if (shift) {
1594: PetscCall(VecRestoreArrayRead(shifts, &shift));
1595: PetscCall(VecDestroy(&shifts));
1596: }
1597: if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1598: PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1599: PetscCall(PetscFree2(rootNodes, leafNodes));
1600: PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1601: if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1602: PetscFunctionReturn(PETSC_SUCCESS);
1603: }
1605: /*@C
1606: DMPlexMigrate - Migrates internal `DM` data over the supplied star forest
1608: Collective
1610: Input Parameters:
1611: + dm - The source `DMPLEX` object
1612: - sf - The star forest communication context describing the migration pattern
1614: Output Parameter:
1615: . targetDM - The target `DMPLEX` object
1617: Level: intermediate
1619: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1620: @*/
1621: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1622: {
1623: MPI_Comm comm;
1624: PetscInt dim, cdim, nroots;
1625: PetscSF sfPoint;
1626: ISLocalToGlobalMapping ltogMigration;
1627: ISLocalToGlobalMapping ltogOriginal = NULL;
1629: PetscFunctionBegin;
1631: PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1632: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1633: PetscCall(DMGetDimension(dm, &dim));
1634: PetscCall(DMSetDimension(targetDM, dim));
1635: PetscCall(DMGetCoordinateDim(dm, &cdim));
1636: PetscCall(DMSetCoordinateDim(targetDM, cdim));
1638: /* Check for a one-to-all distribution pattern */
1639: PetscCall(DMGetPointSF(dm, &sfPoint));
1640: PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1641: if (nroots >= 0) {
1642: IS isOriginal;
1643: PetscInt n, size, nleaves;
1644: PetscInt *numbering_orig, *numbering_new;
1646: /* Get the original point numbering */
1647: PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1648: PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal));
1649: PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1650: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1651: /* Convert to positive global numbers */
1652: for (n = 0; n < size; n++) {
1653: if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1654: }
1655: /* Derive the new local-to-global mapping from the old one */
1656: PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1657: PetscCall(PetscMalloc1(nleaves, &numbering_new));
1658: PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1659: PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1660: PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration));
1661: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1662: PetscCall(ISDestroy(&isOriginal));
1663: } else {
1664: /* One-to-all distribution pattern: We can derive LToG from SF */
1665: PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration));
1666: }
1667: PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1668: PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1669: /* Migrate DM data to target DM */
1670: PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1671: PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1672: PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1673: PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1674: PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal));
1675: PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration));
1676: PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1677: PetscFunctionReturn(PETSC_SUCCESS);
1678: }
1680: /*@
1681: DMPlexRemapMigrationSF - Rewrite the distribution SF to account for overlap
1683: Collective
1685: Input Parameters:
1686: + sfOverlap - The `PetscSF` object just for the overlap
1687: - sfMigration - The original distribution `PetscSF` object
1689: Output Parameters:
1690: . sfMigrationNew - A rewritten `PetscSF` object that incorporates the overlap
1692: Level: developer
1694: .seealso: `DMPLEX`, `DM`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexGetOverlap()`
1695: @*/
1696: PetscErrorCode DMPlexRemapMigrationSF(PetscSF sfOverlap, PetscSF sfMigration, PetscSF *sfMigrationNew)
1697: {
1698: PetscSFNode *newRemote, *permRemote;
1699: const PetscInt *oldLeaves;
1700: const PetscSFNode *oldRemote;
1701: PetscInt nroots, nleaves, noldleaves;
1703: PetscFunctionBegin;
1704: PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1705: PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1706: PetscCall(PetscMalloc1(nleaves, &newRemote));
1707: /* oldRemote: original root point mapping to original leaf point
1708: newRemote: original leaf point mapping to overlapped leaf point */
1709: if (oldLeaves) {
1710: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1711: PetscCall(PetscMalloc1(noldleaves, &permRemote));
1712: for (PetscInt l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1713: oldRemote = permRemote;
1714: }
1715: PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1716: PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1717: if (oldLeaves) PetscCall(PetscFree(oldRemote));
1718: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)sfOverlap), sfMigrationNew));
1719: PetscCall(PetscSFSetGraph(*sfMigrationNew, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1720: PetscFunctionReturn(PETSC_SUCCESS);
1721: }
1723: /*@C
1724: DMPlexDistribute - Distributes the mesh and any associated sections.
1726: Collective
1728: Input Parameters:
1729: + dm - The original `DMPLEX` object
1730: - overlap - The overlap of partitions, 0 is the default
1732: Output Parameters:
1733: + sf - The `PetscSF` used for point distribution, or `NULL` if not needed
1734: - dmParallel - The distributed `DMPLEX` object
1736: Level: intermediate
1738: Note:
1739: If the mesh was not distributed, the output `dmParallel` will be `NULL`.
1741: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
1742: representation on the mesh.
1744: .seealso: `DMPLEX`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1745: @*/
1746: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1747: {
1748: MPI_Comm comm;
1749: PetscPartitioner partitioner;
1750: IS cellPart;
1751: PetscSection cellPartSection;
1752: DM dmCoord;
1753: DMLabel lblPartition, lblMigration;
1754: PetscSF sfMigration, sfStratified, sfPoint;
1755: PetscBool flg, balance;
1756: PetscMPIInt rank, size;
1758: PetscFunctionBegin;
1761: if (sf) PetscAssertPointer(sf, 3);
1762: PetscAssertPointer(dmParallel, 4);
1764: if (sf) *sf = NULL;
1765: *dmParallel = NULL;
1766: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1767: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1768: PetscCallMPI(MPI_Comm_size(comm, &size));
1769: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1771: PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1772: /* Create cell partition */
1773: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1774: PetscCall(PetscSectionCreate(comm, &cellPartSection));
1775: PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1776: PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1777: PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1778: {
1779: /* Convert partition to DMLabel */
1780: IS is;
1781: PetscHSetI ht;
1782: const PetscInt *points;
1783: PetscInt *iranks;
1784: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1786: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1787: /* Preallocate strata */
1788: PetscCall(PetscHSetICreate(&ht));
1789: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1790: for (proc = pStart; proc < pEnd; proc++) {
1791: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1792: if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1793: }
1794: PetscCall(PetscHSetIGetSize(ht, &nranks));
1795: PetscCall(PetscMalloc1(nranks, &iranks));
1796: PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1797: PetscCall(PetscHSetIDestroy(&ht));
1798: PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1799: PetscCall(PetscFree(iranks));
1800: /* Inline DMPlexPartitionLabelClosure() */
1801: PetscCall(ISGetIndices(cellPart, &points));
1802: PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1803: for (proc = pStart; proc < pEnd; proc++) {
1804: PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1805: if (!npoints) continue;
1806: PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1807: PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1808: PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1809: PetscCall(ISDestroy(&is));
1810: }
1811: PetscCall(ISRestoreIndices(cellPart, &points));
1812: }
1813: PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1815: PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1816: PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1817: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, PETSC_TRUE, &sfMigration));
1818: PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1819: PetscCall(PetscSFDestroy(&sfMigration));
1820: sfMigration = sfStratified;
1821: PetscCall(PetscSFSetUp(sfMigration));
1822: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1823: PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1824: if (flg) {
1825: PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1826: PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1827: }
1829: /* Create non-overlapping parallel DM and migrate internal data */
1830: PetscCall(DMPlexCreate(comm, dmParallel));
1831: PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1832: PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1834: /* Build the point SF without overlap */
1835: PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1836: PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1837: PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1838: PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1839: PetscCall(DMPlexMigrateIsoperiodicFaceSF_Internal(dm, *dmParallel, sfMigration));
1840: PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1841: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1842: if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1844: if (overlap > 0) {
1845: DM dmOverlap;
1846: PetscSF sfOverlap, sfOverlapPoint;
1848: /* Add the partition overlap to the distributed DM */
1849: PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1850: PetscCall(DMDestroy(dmParallel));
1851: *dmParallel = dmOverlap;
1852: if (flg) {
1853: PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1854: PetscCall(PetscSFView(sfOverlap, NULL));
1855: }
1857: /* Re-map the migration SF to establish the full migration pattern */
1858: PetscCall(DMPlexRemapMigrationSF(sfOverlap, sfMigration, &sfOverlapPoint));
1859: PetscCall(PetscSFDestroy(&sfOverlap));
1860: PetscCall(PetscSFDestroy(&sfMigration));
1861: sfMigration = sfOverlapPoint;
1862: }
1863: /* Cleanup Partition */
1864: PetscCall(DMLabelDestroy(&lblPartition));
1865: PetscCall(DMLabelDestroy(&lblMigration));
1866: PetscCall(PetscSectionDestroy(&cellPartSection));
1867: PetscCall(ISDestroy(&cellPart));
1868: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1869: // Create sfNatural, need discretization information
1870: PetscCall(DMCopyDisc(dm, *dmParallel));
1871: if (dm->useNatural) {
1872: PetscSection section;
1874: PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1875: PetscCall(DMGetLocalSection(dm, §ion));
1877: /* First DM with useNatural = PETSC_TRUE is considered natural */
1878: /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1879: /* Compose with a previous sfNatural if present */
1880: if (dm->sfNatural) {
1881: PetscSF natSF;
1883: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &natSF));
1884: PetscCall(PetscSFCompose(dm->sfNatural, natSF, &(*dmParallel)->sfNatural));
1885: PetscCall(PetscSFDestroy(&natSF));
1886: } else {
1887: PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1888: }
1889: /* Compose with a previous sfMigration if present */
1890: if (dm->sfMigration) {
1891: PetscSF naturalPointSF;
1893: PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
1894: PetscCall(PetscSFDestroy(&sfMigration));
1895: sfMigration = naturalPointSF;
1896: }
1897: }
1898: /* Cleanup */
1899: if (sf) {
1900: *sf = sfMigration;
1901: } else PetscCall(PetscSFDestroy(&sfMigration));
1902: PetscCall(PetscSFDestroy(&sfPoint));
1903: PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
1904: PetscFunctionReturn(PETSC_SUCCESS);
1905: }
1907: PetscErrorCode DMPlexDistributeOverlap_Internal(DM dm, PetscInt overlap, MPI_Comm newcomm, const char *ovlboundary, PetscSF *sf, DM *dmOverlap)
1908: {
1909: DM_Plex *mesh = (DM_Plex *)dm->data;
1910: MPI_Comm comm;
1911: PetscMPIInt size, rank;
1912: PetscSection rootSection, leafSection;
1913: IS rootrank, leafrank;
1914: DM dmCoord;
1915: DMLabel lblOverlap;
1916: PetscSF sfOverlap, sfStratified, sfPoint;
1917: PetscBool clear_ovlboundary;
1919: PetscFunctionBegin;
1920: if (sf) *sf = NULL;
1921: *dmOverlap = NULL;
1922: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1923: PetscCallMPI(MPI_Comm_size(comm, &size));
1924: PetscCallMPI(MPI_Comm_rank(comm, &rank));
1925: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1926: {
1927: // We need to get options for the _already_distributed mesh, so it must be done here
1928: PetscInt overlap;
1929: const char *prefix;
1930: char oldPrefix[PETSC_MAX_PATH_LEN];
1932: oldPrefix[0] = '\0';
1933: PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
1934: PetscCall(PetscStrncpy(oldPrefix, prefix, sizeof(oldPrefix)));
1935: PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
1936: PetscCall(DMPlexGetOverlap(dm, &overlap));
1937: PetscObjectOptionsBegin((PetscObject)dm);
1938: PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
1939: PetscOptionsEnd();
1940: PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
1941: }
1942: if (ovlboundary) {
1943: PetscBool flg;
1944: PetscCall(DMHasLabel(dm, ovlboundary, &flg));
1945: if (!flg) {
1946: DMLabel label;
1948: PetscCall(DMCreateLabel(dm, ovlboundary));
1949: PetscCall(DMGetLabel(dm, ovlboundary, &label));
1950: PetscCall(DMPlexMarkBoundaryFaces(dm, 1, label));
1951: clear_ovlboundary = PETSC_TRUE;
1952: }
1953: }
1954: PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1955: /* Compute point overlap with neighbouring processes on the distributed DM */
1956: PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1957: PetscCall(PetscSectionCreate(newcomm, &rootSection));
1958: PetscCall(PetscSectionCreate(newcomm, &leafSection));
1959: PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1960: if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1961: else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1962: /* Convert overlap label to stratified migration SF */
1963: PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, PETSC_FALSE, &sfOverlap));
1964: PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1965: PetscCall(PetscSFDestroy(&sfOverlap));
1966: sfOverlap = sfStratified;
1967: PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
1968: PetscCall(PetscSFSetFromOptions(sfOverlap));
1970: PetscCall(PetscSectionDestroy(&rootSection));
1971: PetscCall(PetscSectionDestroy(&leafSection));
1972: PetscCall(ISDestroy(&rootrank));
1973: PetscCall(ISDestroy(&leafrank));
1974: PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1976: /* Build the overlapping DM */
1977: PetscCall(DMPlexCreate(newcomm, dmOverlap));
1978: PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
1979: PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1980: /* Store the overlap in the new DM */
1981: PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
1982: /* Build the new point SF */
1983: PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1984: PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1985: PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1986: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1987: PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
1988: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1989: PetscCall(PetscSFDestroy(&sfPoint));
1990: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmOverlap));
1991: /* TODO: labels stored inside the DS for regions should be handled here */
1992: PetscCall(DMCopyDisc(dm, *dmOverlap));
1993: /* Cleanup overlap partition */
1994: PetscCall(DMLabelDestroy(&lblOverlap));
1995: if (sf) *sf = sfOverlap;
1996: else PetscCall(PetscSFDestroy(&sfOverlap));
1997: if (ovlboundary) {
1998: DMLabel label;
1999: PetscBool flg;
2001: if (clear_ovlboundary) PetscCall(DMRemoveLabel(dm, ovlboundary, NULL));
2002: PetscCall(DMHasLabel(*dmOverlap, ovlboundary, &flg));
2003: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Missing %s label", ovlboundary);
2004: PetscCall(DMGetLabel(*dmOverlap, ovlboundary, &label));
2005: PetscCall(DMPlexMarkBoundaryFaces_Internal(*dmOverlap, 1, 0, label, PETSC_TRUE));
2006: }
2007: PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2008: PetscFunctionReturn(PETSC_SUCCESS);
2009: }
2011: /*@C
2012: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping `DM`.
2014: Collective
2016: Input Parameters:
2017: + dm - The non-overlapping distributed `DMPLEX` object
2018: - overlap - The overlap of partitions (the same on all ranks)
2020: Output Parameters:
2021: + sf - The `PetscSF` used for point distribution, or pass `NULL` if not needed
2022: - dmOverlap - The overlapping distributed `DMPLEX` object
2024: Options Database Keys:
2025: + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
2026: . -dm_plex_overlap_values <int1,int2,...> - List of overlap label values
2027: . -dm_plex_overlap_exclude_label <name> - Label used to exclude points from overlap
2028: - -dm_plex_overlap_exclude_value <int> - Label value used to exclude points from overlap
2030: Level: advanced
2032: Notes:
2033: If the mesh was not distributed, the return value is `NULL`.
2035: The user can control the definition of adjacency for the mesh using `DMSetAdjacency()`. They should choose the combination appropriate for the function
2036: representation on the mesh.
2038: .seealso: `DMPLEX`, `PetscSF`, `DM`, `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
2039: @*/
2040: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
2041: {
2042: PetscFunctionBegin;
2045: if (sf) PetscAssertPointer(sf, 3);
2046: PetscAssertPointer(dmOverlap, 4);
2047: PetscCall(DMPlexDistributeOverlap_Internal(dm, overlap, PetscObjectComm((PetscObject)dm), NULL, sf, dmOverlap));
2048: PetscFunctionReturn(PETSC_SUCCESS);
2049: }
2051: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2052: {
2053: DM_Plex *mesh = (DM_Plex *)dm->data;
2055: PetscFunctionBegin;
2056: *overlap = mesh->overlap;
2057: PetscFunctionReturn(PETSC_SUCCESS);
2058: }
2060: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2061: {
2062: DM_Plex *mesh = NULL;
2063: DM_Plex *meshSrc = NULL;
2065: PetscFunctionBegin;
2067: mesh = (DM_Plex *)dm->data;
2068: if (dmSrc) {
2070: meshSrc = (DM_Plex *)dmSrc->data;
2071: mesh->overlap = meshSrc->overlap;
2072: } else {
2073: mesh->overlap = 0;
2074: }
2075: mesh->overlap += overlap;
2076: PetscFunctionReturn(PETSC_SUCCESS);
2077: }
2079: /*@
2080: DMPlexGetOverlap - Get the width of the cell overlap
2082: Not Collective
2084: Input Parameter:
2085: . dm - The `DM`
2087: Output Parameter:
2088: . overlap - the width of the cell overlap
2090: Level: intermediate
2092: .seealso: `DMPLEX`, `DMPlexSetOverlap()`, `DMPlexDistribute()`
2093: @*/
2094: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2095: {
2096: PetscFunctionBegin;
2098: PetscAssertPointer(overlap, 2);
2099: PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2100: PetscFunctionReturn(PETSC_SUCCESS);
2101: }
2103: /*@
2104: DMPlexSetOverlap - Set the width of the cell overlap
2106: Logically Collective
2108: Input Parameters:
2109: + dm - The `DM`
2110: . dmSrc - The `DM` that produced this one, or `NULL`
2111: - overlap - the width of the cell overlap
2113: Level: intermediate
2115: Note:
2116: The overlap from `dmSrc` is added to `dm`
2118: .seealso: `DMPLEX`, `DMPlexGetOverlap()`, `DMPlexDistribute()`
2119: @*/
2120: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2121: {
2122: PetscFunctionBegin;
2125: PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2126: PetscFunctionReturn(PETSC_SUCCESS);
2127: }
2129: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2130: {
2131: DM_Plex *mesh = (DM_Plex *)dm->data;
2133: PetscFunctionBegin;
2134: mesh->distDefault = dist;
2135: PetscFunctionReturn(PETSC_SUCCESS);
2136: }
2138: /*@
2139: DMPlexDistributeSetDefault - Set flag indicating whether the `DM` should be distributed by default
2141: Logically Collective
2143: Input Parameters:
2144: + dm - The `DM`
2145: - dist - Flag for distribution
2147: Level: intermediate
2149: .seealso: `DMPLEX`, `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2150: @*/
2151: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2152: {
2153: PetscFunctionBegin;
2156: PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2157: PetscFunctionReturn(PETSC_SUCCESS);
2158: }
2160: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2161: {
2162: DM_Plex *mesh = (DM_Plex *)dm->data;
2164: PetscFunctionBegin;
2165: *dist = mesh->distDefault;
2166: PetscFunctionReturn(PETSC_SUCCESS);
2167: }
2169: /*@
2170: DMPlexDistributeGetDefault - Get flag indicating whether the `DM` should be distributed by default
2172: Not Collective
2174: Input Parameter:
2175: . dm - The `DM`
2177: Output Parameter:
2178: . dist - Flag for distribution
2180: Level: intermediate
2182: .seealso: `DMPLEX`, `DM`, `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2183: @*/
2184: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2185: {
2186: PetscFunctionBegin;
2188: PetscAssertPointer(dist, 2);
2189: PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2190: PetscFunctionReturn(PETSC_SUCCESS);
2191: }
2193: /*@C
2194: DMPlexGetGatherDM - Get a copy of the `DMPLEX` that gathers all points on the
2195: root process of the original's communicator.
2197: Collective
2199: Input Parameter:
2200: . dm - the original `DMPLEX` object
2202: Output Parameters:
2203: + sf - the `PetscSF` used for point distribution (optional)
2204: - gatherMesh - the gathered `DM` object, or `NULL`
2206: Level: intermediate
2208: .seealso: `DMPLEX`, `DM`, `PetscSF`, `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2209: @*/
2210: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
2211: {
2212: MPI_Comm comm;
2213: PetscMPIInt size;
2214: PetscPartitioner oldPart, gatherPart;
2216: PetscFunctionBegin;
2218: PetscAssertPointer(gatherMesh, 3);
2219: *gatherMesh = NULL;
2220: if (sf) *sf = NULL;
2221: comm = PetscObjectComm((PetscObject)dm);
2222: PetscCallMPI(MPI_Comm_size(comm, &size));
2223: if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
2224: PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2225: PetscCall(PetscObjectReference((PetscObject)oldPart));
2226: PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2227: PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2228: PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2229: PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2231: PetscCall(DMPlexSetPartitioner(dm, oldPart));
2232: PetscCall(PetscPartitionerDestroy(&gatherPart));
2233: PetscCall(PetscPartitionerDestroy(&oldPart));
2234: PetscFunctionReturn(PETSC_SUCCESS);
2235: }
2237: /*@C
2238: DMPlexGetRedundantDM - Get a copy of the `DMPLEX` that is completely copied on each process.
2240: Collective
2242: Input Parameter:
2243: . dm - the original `DMPLEX` object
2245: Output Parameters:
2246: + sf - the `PetscSF` used for point distribution (optional)
2247: - redundantMesh - the redundant `DM` object, or `NULL`
2249: Level: intermediate
2251: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2252: @*/
2253: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
2254: {
2255: MPI_Comm comm;
2256: PetscMPIInt size, rank;
2257: PetscInt pStart, pEnd, p;
2258: PetscInt numPoints = -1;
2259: PetscSF migrationSF, sfPoint, gatherSF;
2260: DM gatherDM, dmCoord;
2261: PetscSFNode *points;
2263: PetscFunctionBegin;
2265: PetscAssertPointer(redundantMesh, 3);
2266: *redundantMesh = NULL;
2267: comm = PetscObjectComm((PetscObject)dm);
2268: PetscCallMPI(MPI_Comm_size(comm, &size));
2269: if (size == 1) {
2270: PetscCall(PetscObjectReference((PetscObject)dm));
2271: *redundantMesh = dm;
2272: if (sf) *sf = NULL;
2273: PetscFunctionReturn(PETSC_SUCCESS);
2274: }
2275: PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2276: if (!gatherDM) PetscFunctionReturn(PETSC_SUCCESS);
2277: PetscCallMPI(MPI_Comm_rank(comm, &rank));
2278: PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2279: numPoints = pEnd - pStart;
2280: PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2281: PetscCall(PetscMalloc1(numPoints, &points));
2282: PetscCall(PetscSFCreate(comm, &migrationSF));
2283: for (p = 0; p < numPoints; p++) {
2284: points[p].index = p;
2285: points[p].rank = 0;
2286: }
2287: PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2288: PetscCall(DMPlexCreate(comm, redundantMesh));
2289: PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2290: PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2291: /* This is to express that all point are in overlap */
2292: PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT));
2293: PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2294: PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2295: PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2296: if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2297: PetscCall(PetscSFDestroy(&sfPoint));
2298: if (sf) {
2299: PetscSF tsf;
2301: PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2302: PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2303: PetscCall(PetscSFDestroy(&tsf));
2304: }
2305: PetscCall(PetscSFDestroy(&migrationSF));
2306: PetscCall(PetscSFDestroy(&gatherSF));
2307: PetscCall(DMDestroy(&gatherDM));
2308: PetscCall(DMCopyDisc(dm, *redundantMesh));
2309: PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2310: PetscFunctionReturn(PETSC_SUCCESS);
2311: }
2313: /*@
2314: DMPlexIsDistributed - Find out whether this `DM` is distributed, i.e. more than one rank owns some points.
2316: Collective
2318: Input Parameter:
2319: . dm - The `DM` object
2321: Output Parameter:
2322: . distributed - Flag whether the `DM` is distributed
2324: Level: intermediate
2326: Notes:
2327: This currently finds out whether at least two ranks have any DAG points.
2328: This involves `MPI_Allreduce()` with one integer.
2329: The result is currently not stashed so every call to this routine involves this global communication.
2331: .seealso: `DMPLEX`, `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2332: @*/
2333: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2334: {
2335: PetscInt pStart, pEnd, count;
2336: MPI_Comm comm;
2337: PetscMPIInt size;
2339: PetscFunctionBegin;
2341: PetscAssertPointer(distributed, 2);
2342: PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2343: PetscCallMPI(MPI_Comm_size(comm, &size));
2344: if (size == 1) {
2345: *distributed = PETSC_FALSE;
2346: PetscFunctionReturn(PETSC_SUCCESS);
2347: }
2348: PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2349: count = (pEnd - pStart) > 0 ? 1 : 0;
2350: PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2351: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2352: PetscFunctionReturn(PETSC_SUCCESS);
2353: }
2355: /*@C
2356: DMPlexDistributionSetName - Set the name of the specific parallel distribution
2358: Input Parameters:
2359: + dm - The `DM`
2360: - name - The name of the specific parallel distribution
2362: Level: developer
2364: Note:
2365: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2366: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2367: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2368: loads the parallel distribution stored in file under this name.
2370: .seealso: `DMPLEX`, `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2371: @*/
2372: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2373: {
2374: DM_Plex *mesh = (DM_Plex *)dm->data;
2376: PetscFunctionBegin;
2378: if (name) PetscAssertPointer(name, 2);
2379: PetscCall(PetscFree(mesh->distributionName));
2380: PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2381: PetscFunctionReturn(PETSC_SUCCESS);
2382: }
2384: /*@C
2385: DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2387: Input Parameter:
2388: . dm - The `DM`
2390: Output Parameter:
2391: . name - The name of the specific parallel distribution
2393: Level: developer
2395: Note:
2396: If distribution name is set when saving, `DMPlexTopologyView()` saves the plex's
2397: parallel distribution (i.e., partition, ownership, and local ordering of points) under
2398: this name. Conversely, if distribution name is set when loading, `DMPlexTopologyLoad()`
2399: loads the parallel distribution stored in file under this name.
2401: .seealso: `DMPLEX`, `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2402: @*/
2403: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2404: {
2405: DM_Plex *mesh = (DM_Plex *)dm->data;
2407: PetscFunctionBegin;
2409: PetscAssertPointer(name, 2);
2410: *name = mesh->distributionName;
2411: PetscFunctionReturn(PETSC_SUCCESS);
2412: }