Actual source code: plexdistribute.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be NULL (to clear the callback)
10: - ctx - context for callback evaluation, may be NULL
12: Level: advanced
14: Notes:
15: The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of DMPlex adjacency determination.
19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: return(0);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The DM object
38: Output Parameters:
39: + user - The user callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: return(0);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The DM object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *) dm->data;
74: mesh->useAnchors = useAnchors;
75: return(0);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The DM object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *) dm->data;
98: *useAnchors = mesh->useAnchors;
99: return(0);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106: PetscErrorCode ierr;
109: DMPlexGetConeSize(dm, p, &coneSize);
110: DMPlexGetCone(dm, p, &cone);
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c-1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: DMPlexGetSupportSize(dm, point, &supportSize);
117: DMPlexGetSupport(dm, point, &support);
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: return(0);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscErrorCode ierr;
136: DMPlexGetSupportSize(dm, p, &supportSize);
137: DMPlexGetSupport(dm, p, &support);
138: for (s = 0; s <= supportSize; ++s) {
139: const PetscInt point = !s ? p : support[s-1];
140: const PetscInt *cone = NULL;
141: PetscInt coneSize, c, q;
143: DMPlexGetConeSize(dm, point, &coneSize);
144: DMPlexGetCone(dm, point, &cone);
145: for (c = 0; c < coneSize; ++c) {
146: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147: if (cone[c] == adj[q]) break;
148: }
149: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150: }
151: }
152: *adjSize = numAdj;
153: return(0);
154: }
156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158: PetscInt *star = NULL;
159: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
163: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164: for (s = 0; s < starSize*2; s += 2) {
165: const PetscInt *closure = NULL;
166: PetscInt closureSize, c, q;
168: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169: for (c = 0; c < closureSize*2; c += 2) {
170: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171: if (closure[c] == adj[q]) break;
172: }
173: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174: }
175: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176: }
177: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178: *adjSize = numAdj;
179: return(0);
180: }
182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184: static PetscInt asiz = 0;
185: PetscInt maxAnchors = 1;
186: PetscInt aStart = -1, aEnd = -1;
187: PetscInt maxAdjSize;
188: PetscSection aSec = NULL;
189: IS aIS = NULL;
190: const PetscInt *anchors;
191: DM_Plex *mesh = (DM_Plex *)dm->data;
192: PetscErrorCode ierr;
195: if (useAnchors) {
196: DMPlexGetAnchors(dm,&aSec,&aIS);
197: if (aSec) {
198: PetscSectionGetMaxDof(aSec,&maxAnchors);
199: maxAnchors = PetscMax(1,maxAnchors);
200: PetscSectionGetChart(aSec,&aStart,&aEnd);
201: ISGetIndices(aIS,&anchors);
202: }
203: }
204: if (!*adj) {
205: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
207: DMPlexGetChart(dm, &pStart,&pEnd);
208: DMPlexGetDepth(dm, &depth);
209: depth = PetscMax(depth, -depth);
210: DMPlexGetMaxSizes(dm, &maxC, &maxS);
211: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
212: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
213: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
214: asiz *= maxAnchors;
215: asiz = PetscMin(asiz,pEnd-pStart);
216: PetscMalloc1(asiz,adj);
217: }
218: if (*adjSize < 0) *adjSize = asiz;
219: maxAdjSize = *adjSize;
220: if (mesh->useradjacency) {
221: (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
222: } else if (useTransitiveClosure) {
223: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
224: } else if (useCone) {
225: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
226: } else {
227: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
228: }
229: if (useAnchors && aSec) {
230: PetscInt origSize = *adjSize;
231: PetscInt numAdj = origSize;
232: PetscInt i = 0, j;
233: PetscInt *orig = *adj;
235: while (i < origSize) {
236: PetscInt p = orig[i];
237: PetscInt aDof = 0;
239: if (p >= aStart && p < aEnd) {
240: PetscSectionGetDof(aSec,p,&aDof);
241: }
242: if (aDof) {
243: PetscInt aOff;
244: PetscInt s, q;
246: for (j = i + 1; j < numAdj; j++) {
247: orig[j - 1] = orig[j];
248: }
249: origSize--;
250: numAdj--;
251: PetscSectionGetOffset(aSec,p,&aOff);
252: for (s = 0; s < aDof; ++s) {
253: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
254: if (anchors[aOff+s] == orig[q]) break;
255: }
256: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
257: }
258: }
259: else {
260: i++;
261: }
262: }
263: *adjSize = numAdj;
264: ISRestoreIndices(aIS,&anchors);
265: }
266: return(0);
267: }
269: /*@
270: DMPlexGetAdjacency - Return all points adjacent to the given point
272: Input Parameters:
273: + dm - The DM object
274: - p - The point
276: Input/Output Parameters:
277: + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
278: on output the number of adjacent points
279: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
280: on output contains the adjacent points
282: Level: advanced
284: Notes:
285: The user must PetscFree the adj array if it was not passed in.
287: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
288: @*/
289: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
290: {
291: PetscBool useCone, useClosure, useAnchors;
298: DMGetBasicAdjacency(dm, &useCone, &useClosure);
299: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
300: DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
301: return(0);
302: }
304: /*@
305: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
307: Collective on dm
309: Input Parameters:
310: + dm - The DM
311: . sfPoint - The PetscSF which encodes point connectivity
312: . rootRankSection -
313: . rootRanks -
314: . leftRankSection -
315: - leafRanks -
317: Output Parameters:
318: + processRanks - A list of process neighbors, or NULL
319: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
321: Level: developer
323: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
324: @*/
325: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
326: {
327: const PetscSFNode *remotePoints;
328: PetscInt *localPointsNew;
329: PetscSFNode *remotePointsNew;
330: const PetscInt *nranks;
331: PetscInt *ranksNew;
332: PetscBT neighbors;
333: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
334: PetscMPIInt size, proc, rank;
335: PetscErrorCode ierr;
342: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
343: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
344: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
345: PetscBTCreate(size, &neighbors);
346: PetscBTMemzero(size, neighbors);
347: /* Compute root-to-leaf process connectivity */
348: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
349: ISGetIndices(rootRanks, &nranks);
350: for (p = pStart; p < pEnd; ++p) {
351: PetscInt ndof, noff, n;
353: PetscSectionGetDof(rootRankSection, p, &ndof);
354: PetscSectionGetOffset(rootRankSection, p, &noff);
355: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
356: }
357: ISRestoreIndices(rootRanks, &nranks);
358: /* Compute leaf-to-neighbor process connectivity */
359: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
360: ISGetIndices(leafRanks, &nranks);
361: for (p = pStart; p < pEnd; ++p) {
362: PetscInt ndof, noff, n;
364: PetscSectionGetDof(leafRankSection, p, &ndof);
365: PetscSectionGetOffset(leafRankSection, p, &noff);
366: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
367: }
368: ISRestoreIndices(leafRanks, &nranks);
369: /* Compute leaf-to-root process connectivity */
370: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
371: /* Calculate edges */
372: PetscBTClear(neighbors, rank);
373: for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
374: PetscMalloc1(numNeighbors, &ranksNew);
375: PetscMalloc1(numNeighbors, &localPointsNew);
376: PetscMalloc1(numNeighbors, &remotePointsNew);
377: for (proc = 0, n = 0; proc < size; ++proc) {
378: if (PetscBTLookup(neighbors, proc)) {
379: ranksNew[n] = proc;
380: localPointsNew[n] = proc;
381: remotePointsNew[n].index = rank;
382: remotePointsNew[n].rank = proc;
383: ++n;
384: }
385: }
386: PetscBTDestroy(&neighbors);
387: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
388: else {PetscFree(ranksNew);}
389: if (sfProcess) {
390: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
391: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
392: PetscSFSetFromOptions(*sfProcess);
393: PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
394: }
395: return(0);
396: }
398: /*@
399: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
401: Collective on dm
403: Input Parameter:
404: . dm - The DM
406: Output Parameters:
407: + rootSection - The number of leaves for a given root point
408: . rootrank - The rank of each edge into the root point
409: . leafSection - The number of processes sharing a given leaf point
410: - leafrank - The rank of each process sharing a leaf point
412: Level: developer
414: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
415: @*/
416: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
417: {
418: MPI_Comm comm;
419: PetscSF sfPoint;
420: const PetscInt *rootdegree;
421: PetscInt *myrank, *remoterank;
422: PetscInt pStart, pEnd, p, nedges;
423: PetscMPIInt rank;
424: PetscErrorCode ierr;
427: PetscObjectGetComm((PetscObject) dm, &comm);
428: MPI_Comm_rank(comm, &rank);
429: DMPlexGetChart(dm, &pStart, &pEnd);
430: DMGetPointSF(dm, &sfPoint);
431: /* Compute number of leaves for each root */
432: PetscObjectSetName((PetscObject) rootSection, "Root Section");
433: PetscSectionSetChart(rootSection, pStart, pEnd);
434: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
435: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
436: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
437: PetscSectionSetUp(rootSection);
438: /* Gather rank of each leaf to root */
439: PetscSectionGetStorageSize(rootSection, &nedges);
440: PetscMalloc1(pEnd-pStart, &myrank);
441: PetscMalloc1(nedges, &remoterank);
442: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
443: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
444: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
445: PetscFree(myrank);
446: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
447: /* Distribute remote ranks to leaves */
448: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
449: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
450: return(0);
451: }
453: /*@C
454: DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.
456: Collective on dm
458: Input Parameters:
459: + dm - The DM
460: . levels - Number of overlap levels
461: . rootSection - The number of leaves for a given root point
462: . rootrank - The rank of each edge into the root point
463: . leafSection - The number of processes sharing a given leaf point
464: - leafrank - The rank of each process sharing a leaf point
466: Output Parameter:
467: . ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
469: Level: developer
471: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
472: @*/
473: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
474: {
475: MPI_Comm comm;
476: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
477: PetscSF sfPoint;
478: const PetscSFNode *remote;
479: const PetscInt *local;
480: const PetscInt *nrank, *rrank;
481: PetscInt *adj = NULL;
482: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
483: PetscMPIInt rank, size;
484: PetscBool flg;
485: PetscErrorCode ierr;
488: *ovLabel = NULL;
489: PetscObjectGetComm((PetscObject) dm, &comm);
490: MPI_Comm_size(comm, &size);
491: MPI_Comm_rank(comm, &rank);
492: if (size == 1) return(0);
493: DMGetPointSF(dm, &sfPoint);
494: DMPlexGetChart(dm, &pStart, &pEnd);
495: if (!levels) {
496: /* Add owned points */
497: DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
498: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
499: return(0);
500: }
501: PetscSectionGetChart(leafSection, &sStart, &sEnd);
502: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
503: DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
504: /* Handle leaves: shared with the root point */
505: for (l = 0; l < nleaves; ++l) {
506: PetscInt adjSize = PETSC_DETERMINE, a;
508: DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
509: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
510: }
511: ISGetIndices(rootrank, &rrank);
512: ISGetIndices(leafrank, &nrank);
513: /* Handle roots */
514: for (p = pStart; p < pEnd; ++p) {
515: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
517: if ((p >= sStart) && (p < sEnd)) {
518: /* Some leaves share a root with other leaves on different processes */
519: PetscSectionGetDof(leafSection, p, &neighbors);
520: if (neighbors) {
521: PetscSectionGetOffset(leafSection, p, &noff);
522: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
523: for (n = 0; n < neighbors; ++n) {
524: const PetscInt remoteRank = nrank[noff+n];
526: if (remoteRank == rank) continue;
527: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
528: }
529: }
530: }
531: /* Roots are shared with leaves */
532: PetscSectionGetDof(rootSection, p, &neighbors);
533: if (!neighbors) continue;
534: PetscSectionGetOffset(rootSection, p, &noff);
535: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
536: for (n = 0; n < neighbors; ++n) {
537: const PetscInt remoteRank = rrank[noff+n];
539: if (remoteRank == rank) continue;
540: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
541: }
542: }
543: PetscFree(adj);
544: ISRestoreIndices(rootrank, &rrank);
545: ISRestoreIndices(leafrank, &nrank);
546: /* Add additional overlap levels */
547: for (l = 1; l < levels; l++) {
548: /* Propagate point donations over SF to capture remote connections */
549: DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
550: /* Add next level of point donations to the label */
551: DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
552: }
553: /* We require the closure in the overlap */
554: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
555: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
556: if (flg) {
557: PetscViewer viewer;
558: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
559: DMLabelView(ovAdjByRank, viewer);
560: }
561: /* Invert sender to receiver label */
562: DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
563: DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
564: /* Add owned points, except for shared local points */
565: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
566: for (l = 0; l < nleaves; ++l) {
567: DMLabelClearValue(*ovLabel, local[l], rank);
568: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
569: }
570: /* Clean up */
571: DMLabelDestroy(&ovAdjByRank);
572: return(0);
573: }
575: /*@C
576: DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
578: Collective on dm
580: Input Parameters:
581: + dm - The DM
582: - overlapSF - The SF mapping ghost points in overlap to owner points on other processes
584: Output Parameter:
585: . migrationSF - An SF that maps original points in old locations to points in new locations
587: Level: developer
589: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
590: @*/
591: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
592: {
593: MPI_Comm comm;
594: PetscMPIInt rank, size;
595: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
596: PetscInt *pointDepths, *remoteDepths, *ilocal;
597: PetscInt *depthRecv, *depthShift, *depthIdx;
598: PetscSFNode *iremote;
599: PetscSF pointSF;
600: const PetscInt *sharedLocal;
601: const PetscSFNode *overlapRemote, *sharedRemote;
602: PetscErrorCode ierr;
606: PetscObjectGetComm((PetscObject)dm, &comm);
607: MPI_Comm_rank(comm, &rank);
608: MPI_Comm_size(comm, &size);
609: DMGetDimension(dm, &dim);
611: /* Before building the migration SF we need to know the new stratum offsets */
612: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
613: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
614: for (d=0; d<dim+1; d++) {
615: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
616: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
617: }
618: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
619: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);
620: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE);
622: /* Count received points in each stratum and compute the internal strata shift */
623: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
624: for (d=0; d<dim+1; d++) depthRecv[d]=0;
625: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
626: depthShift[dim] = 0;
627: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
628: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
629: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
630: for (d=0; d<dim+1; d++) {
631: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
632: depthIdx[d] = pStart + depthShift[d];
633: }
635: /* Form the overlap SF build an SF that describes the full overlap migration SF */
636: DMPlexGetChart(dm, &pStart, &pEnd);
637: newLeaves = pEnd - pStart + nleaves;
638: PetscMalloc1(newLeaves, &ilocal);
639: PetscMalloc1(newLeaves, &iremote);
640: /* First map local points to themselves */
641: for (d=0; d<dim+1; d++) {
642: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
643: for (p=pStart; p<pEnd; p++) {
644: point = p + depthShift[d];
645: ilocal[point] = point;
646: iremote[point].index = p;
647: iremote[point].rank = rank;
648: depthIdx[d]++;
649: }
650: }
652: /* Add in the remote roots for currently shared points */
653: DMGetPointSF(dm, &pointSF);
654: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
655: for (d=0; d<dim+1; d++) {
656: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
657: for (p=0; p<numSharedPoints; p++) {
658: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
659: point = sharedLocal[p] + depthShift[d];
660: iremote[point].index = sharedRemote[p].index;
661: iremote[point].rank = sharedRemote[p].rank;
662: }
663: }
664: }
666: /* Now add the incoming overlap points */
667: for (p=0; p<nleaves; p++) {
668: point = depthIdx[remoteDepths[p]];
669: ilocal[point] = point;
670: iremote[point].index = overlapRemote[p].index;
671: iremote[point].rank = overlapRemote[p].rank;
672: depthIdx[remoteDepths[p]]++;
673: }
674: PetscFree2(pointDepths,remoteDepths);
676: PetscSFCreate(comm, migrationSF);
677: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
678: PetscSFSetFromOptions(*migrationSF);
679: DMPlexGetChart(dm, &pStart, &pEnd);
680: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
682: PetscFree3(depthRecv, depthShift, depthIdx);
683: return(0);
684: }
686: /*@
687: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
689: Input Parameters:
690: + dm - The DM
691: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
693: Output Parameter:
694: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
696: Note:
697: This lexicographically sorts by (depth, cellType)
699: Level: developer
701: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
702: @*/
703: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
704: {
705: MPI_Comm comm;
706: PetscMPIInt rank, size;
707: PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
708: PetscSFNode *pointDepths, *remoteDepths;
709: PetscInt *ilocal;
710: PetscInt *depthRecv, *depthShift, *depthIdx;
711: PetscInt *ctRecv, *ctShift, *ctIdx;
712: const PetscSFNode *iremote;
713: PetscErrorCode ierr;
717: PetscObjectGetComm((PetscObject) dm, &comm);
718: MPI_Comm_rank(comm, &rank);
719: MPI_Comm_size(comm, &size);
720: DMPlexGetDepth(dm, &ldepth);
721: DMGetDimension(dm, &dim);
722: MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
723: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
724: PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);
726: /* Before building the migration SF we need to know the new stratum offsets */
727: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
728: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
729: for (d = 0; d < depth+1; ++d) {
730: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
731: for (p = pStart; p < pEnd; ++p) {
732: DMPolytopeType ct;
734: DMPlexGetCellType(dm, p, &ct);
735: pointDepths[p].index = d;
736: pointDepths[p].rank = ct;
737: }
738: }
739: for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
740: PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
741: PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE);
742: /* Count received points in each stratum and compute the internal strata shift */
743: PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx);
744: for (p = 0; p < nleaves; ++p) {
745: if (remoteDepths[p].rank < 0) {
746: ++depthRecv[remoteDepths[p].index];
747: } else {
748: ++ctRecv[remoteDepths[p].rank];
749: }
750: }
751: {
752: PetscInt depths[4], dims[4], shift = 0, i, c;
754: /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
755: Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
756: */
757: depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
758: dims[0] = dim; dims[1] = 0; dims[2] = dim-1; dims[3] = 1;
759: for (i = 0; i <= depth; ++i) {
760: const PetscInt dep = depths[i];
761: const PetscInt dim = dims[i];
763: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
764: if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
765: ctShift[c] = shift;
766: shift += ctRecv[c];
767: }
768: depthShift[dep] = shift;
769: shift += depthRecv[dep];
770: }
771: for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
772: const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);
774: if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
775: ctShift[c] = shift;
776: shift += ctRecv[c];
777: }
778: }
779: }
780: /* Derive a new local permutation based on stratified indices */
781: PetscMalloc1(nleaves, &ilocal);
782: for (p = 0; p < nleaves; ++p) {
783: const PetscInt dep = remoteDepths[p].index;
784: const DMPolytopeType ct = (DMPolytopeType) remoteDepths[p].rank;
786: if ((PetscInt) ct < 0) {
787: ilocal[p] = depthShift[dep] + depthIdx[dep];
788: ++depthIdx[dep];
789: } else {
790: ilocal[p] = ctShift[ct] + ctIdx[ct];
791: ++ctIdx[ct];
792: }
793: }
794: PetscSFCreate(comm, migrationSF);
795: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
796: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
797: PetscFree2(pointDepths,remoteDepths);
798: PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx);
799: PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
800: return(0);
801: }
803: /*@
804: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
806: Collective on dm
808: Input Parameters:
809: + dm - The DMPlex object
810: . pointSF - The PetscSF describing the communication pattern
811: . originalSection - The PetscSection for existing data layout
812: - originalVec - The existing data in a local vector
814: Output Parameters:
815: + newSection - The PetscSF describing the new data layout
816: - newVec - The new data in a local vector
818: Level: developer
820: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
821: @*/
822: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
823: {
824: PetscSF fieldSF;
825: PetscInt *remoteOffsets, fieldSize;
826: PetscScalar *originalValues, *newValues;
830: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
831: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
833: PetscSectionGetStorageSize(newSection, &fieldSize);
834: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
835: VecSetType(newVec,dm->vectype);
837: VecGetArray(originalVec, &originalValues);
838: VecGetArray(newVec, &newValues);
839: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
840: PetscFree(remoteOffsets);
841: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
842: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE);
843: PetscSFDestroy(&fieldSF);
844: VecRestoreArray(newVec, &newValues);
845: VecRestoreArray(originalVec, &originalValues);
846: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
847: return(0);
848: }
850: /*@
851: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
853: Collective on dm
855: Input Parameters:
856: + dm - The DMPlex object
857: . pointSF - The PetscSF describing the communication pattern
858: . originalSection - The PetscSection for existing data layout
859: - originalIS - The existing data
861: Output Parameters:
862: + newSection - The PetscSF describing the new data layout
863: - newIS - The new data
865: Level: developer
867: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
868: @*/
869: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
870: {
871: PetscSF fieldSF;
872: PetscInt *newValues, *remoteOffsets, fieldSize;
873: const PetscInt *originalValues;
874: PetscErrorCode ierr;
877: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
878: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
880: PetscSectionGetStorageSize(newSection, &fieldSize);
881: PetscMalloc1(fieldSize, &newValues);
883: ISGetIndices(originalIS, &originalValues);
884: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
885: PetscFree(remoteOffsets);
886: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
887: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE);
888: PetscSFDestroy(&fieldSF);
889: ISRestoreIndices(originalIS, &originalValues);
890: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
891: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
892: return(0);
893: }
895: /*@
896: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
898: Collective on dm
900: Input Parameters:
901: + dm - The DMPlex object
902: . pointSF - The PetscSF describing the communication pattern
903: . originalSection - The PetscSection for existing data layout
904: . datatype - The type of data
905: - originalData - The existing data
907: Output Parameters:
908: + newSection - The PetscSection describing the new data layout
909: - newData - The new data
911: Level: developer
913: .seealso: DMPlexDistribute(), DMPlexDistributeField()
914: @*/
915: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
916: {
917: PetscSF fieldSF;
918: PetscInt *remoteOffsets, fieldSize;
919: PetscMPIInt dataSize;
923: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
924: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
926: PetscSectionGetStorageSize(newSection, &fieldSize);
927: MPI_Type_size(datatype, &dataSize);
928: PetscMalloc(fieldSize * dataSize, newData);
930: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
931: PetscFree(remoteOffsets);
932: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
933: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE);
934: PetscSFDestroy(&fieldSF);
935: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
936: return(0);
937: }
939: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
940: {
941: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
942: MPI_Comm comm;
943: PetscSF coneSF;
944: PetscSection originalConeSection, newConeSection;
945: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
946: PetscBool flg;
947: PetscErrorCode ierr;
952: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
953: /* Distribute cone section */
954: PetscObjectGetComm((PetscObject)dm, &comm);
955: DMPlexGetConeSection(dm, &originalConeSection);
956: DMPlexGetConeSection(dmParallel, &newConeSection);
957: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
958: DMSetUp(dmParallel);
959: {
960: PetscInt pStart, pEnd, p;
962: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
963: for (p = pStart; p < pEnd; ++p) {
964: PetscInt coneSize;
965: PetscSectionGetDof(newConeSection, p, &coneSize);
966: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
967: }
968: }
969: /* Communicate and renumber cones */
970: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
971: PetscFree(remoteOffsets);
972: DMPlexGetCones(dm, &cones);
973: if (original) {
974: PetscInt numCones;
976: PetscSectionGetStorageSize(originalConeSection,&numCones);
977: PetscMalloc1(numCones,&globCones);
978: ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
979: } else {
980: globCones = cones;
981: }
982: DMPlexGetCones(dmParallel, &newCones);
983: PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
984: PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE);
985: if (original) {
986: PetscFree(globCones);
987: }
988: PetscSectionGetStorageSize(newConeSection, &newConesSize);
989: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
990: if (PetscDefined(USE_DEBUG)) {
991: PetscInt p;
992: PetscBool valid = PETSC_TRUE;
993: for (p = 0; p < newConesSize; ++p) {
994: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
995: }
996: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
997: }
998: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
999: if (flg) {
1000: PetscPrintf(comm, "Serial Cone Section:\n");
1001: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
1002: PetscPrintf(comm, "Parallel Cone Section:\n");
1003: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
1004: PetscSFView(coneSF, NULL);
1005: }
1006: DMPlexGetConeOrientations(dm, &cones);
1007: DMPlexGetConeOrientations(dmParallel, &newCones);
1008: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
1009: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE);
1010: PetscSFDestroy(&coneSF);
1011: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1012: /* Create supports and stratify DMPlex */
1013: {
1014: PetscInt pStart, pEnd;
1016: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1017: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1018: }
1019: DMPlexSymmetrize(dmParallel);
1020: DMPlexStratify(dmParallel);
1021: {
1022: PetscBool useCone, useClosure, useAnchors;
1024: DMGetBasicAdjacency(dm, &useCone, &useClosure);
1025: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1026: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1027: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1028: }
1029: return(0);
1030: }
1032: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1033: {
1034: MPI_Comm comm;
1035: DM cdm, cdmParallel;
1036: PetscSection originalCoordSection, newCoordSection;
1037: Vec originalCoordinates, newCoordinates;
1038: PetscInt bs;
1039: PetscBool isper;
1040: const char *name;
1041: const PetscReal *maxCell, *L;
1042: const DMBoundaryType *bd;
1043: PetscErrorCode ierr;
1049: PetscObjectGetComm((PetscObject)dm, &comm);
1050: DMGetCoordinateSection(dm, &originalCoordSection);
1051: DMGetCoordinateSection(dmParallel, &newCoordSection);
1052: DMGetCoordinatesLocal(dm, &originalCoordinates);
1053: if (originalCoordinates) {
1054: VecCreate(PETSC_COMM_SELF, &newCoordinates);
1055: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1056: PetscObjectSetName((PetscObject) newCoordinates, name);
1058: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1059: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1060: VecGetBlockSize(originalCoordinates, &bs);
1061: VecSetBlockSize(newCoordinates, bs);
1062: VecDestroy(&newCoordinates);
1063: }
1064: DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1065: DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1066: DMGetCoordinateDM(dm, &cdm);
1067: DMGetCoordinateDM(dmParallel, &cdmParallel);
1068: DMCopyDisc(cdm, cdmParallel);
1069: return(0);
1070: }
1072: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1073: {
1074: DM_Plex *mesh = (DM_Plex*) dm->data;
1075: MPI_Comm comm;
1076: DMLabel depthLabel;
1077: PetscMPIInt rank;
1078: PetscInt depth, d, numLabels, numLocalLabels, l;
1079: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1080: PetscObjectState depthState = -1;
1081: PetscErrorCode ierr;
1087: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1088: PetscObjectGetComm((PetscObject)dm, &comm);
1089: MPI_Comm_rank(comm, &rank);
1091: /* If the user has changed the depth label, communicate it instead */
1092: DMPlexGetDepth(dm, &depth);
1093: DMPlexGetDepthLabel(dm, &depthLabel);
1094: if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1095: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1096: MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1097: if (sendDepth) {
1098: DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1099: DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1100: }
1101: /* Everyone must have either the same number of labels, or none */
1102: DMGetNumLabels(dm, &numLocalLabels);
1103: numLabels = numLocalLabels;
1104: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1105: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1106: for (l = 0; l < numLabels; ++l) {
1107: DMLabel label = NULL, labelNew = NULL;
1108: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1109: const char *name = NULL;
1111: if (hasLabels) {
1112: DMGetLabelByNum(dm, l, &label);
1113: /* Skip "depth" because it is recreated */
1114: PetscObjectGetName((PetscObject) label, &name);
1115: PetscStrcmp(name, "depth", &isDepth);
1116: }
1117: MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1118: if (isDepth && !sendDepth) continue;
1119: DMLabelDistribute(label, migrationSF, &labelNew);
1120: if (isDepth) {
1121: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1122: PetscInt gdepth;
1124: MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1125: if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1126: for (d = 0; d <= gdepth; ++d) {
1127: PetscBool has;
1129: DMLabelHasStratum(labelNew, d, &has);
1130: if (!has) {DMLabelAddStratum(labelNew, d);}
1131: }
1132: }
1133: DMAddLabel(dmParallel, labelNew);
1134: /* Put the output flag in the new label */
1135: if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1136: MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1137: PetscObjectGetName((PetscObject) labelNew, &name);
1138: DMSetLabelOutput(dmParallel, name, isOutput);
1139: DMLabelDestroy(&labelNew);
1140: }
1141: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1142: return(0);
1143: }
1145: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1146: {
1147: DM_Plex *mesh = (DM_Plex*) dm->data;
1148: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1149: MPI_Comm comm;
1150: DM refTree;
1151: PetscSection origParentSection, newParentSection;
1152: PetscInt *origParents, *origChildIDs;
1153: PetscBool flg;
1154: PetscErrorCode ierr;
1159: PetscObjectGetComm((PetscObject)dm, &comm);
1161: /* Set up tree */
1162: DMPlexGetReferenceTree(dm,&refTree);
1163: DMPlexSetReferenceTree(dmParallel,refTree);
1164: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1165: if (origParentSection) {
1166: PetscInt pStart, pEnd;
1167: PetscInt *newParents, *newChildIDs, *globParents;
1168: PetscInt *remoteOffsetsParents, newParentSize;
1169: PetscSF parentSF;
1171: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1172: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1173: PetscSectionSetChart(newParentSection,pStart,pEnd);
1174: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1175: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1176: PetscFree(remoteOffsetsParents);
1177: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1178: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1179: if (original) {
1180: PetscInt numParents;
1182: PetscSectionGetStorageSize(origParentSection,&numParents);
1183: PetscMalloc1(numParents,&globParents);
1184: ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1185: }
1186: else {
1187: globParents = origParents;
1188: }
1189: PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1190: PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE);
1191: if (original) {
1192: PetscFree(globParents);
1193: }
1194: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1195: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE);
1196: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1197: if (PetscDefined(USE_DEBUG)) {
1198: PetscInt p;
1199: PetscBool valid = PETSC_TRUE;
1200: for (p = 0; p < newParentSize; ++p) {
1201: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1202: }
1203: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1204: }
1205: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1206: if (flg) {
1207: PetscPrintf(comm, "Serial Parent Section: \n");
1208: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1209: PetscPrintf(comm, "Parallel Parent Section: \n");
1210: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1211: PetscSFView(parentSF, NULL);
1212: }
1213: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1214: PetscSectionDestroy(&newParentSection);
1215: PetscFree2(newParents,newChildIDs);
1216: PetscSFDestroy(&parentSF);
1217: }
1218: pmesh->useAnchors = mesh->useAnchors;
1219: return(0);
1220: }
1222: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1223: {
1224: PetscMPIInt rank, size;
1225: MPI_Comm comm;
1226: PetscErrorCode ierr;
1232: /* Create point SF for parallel mesh */
1233: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1234: PetscObjectGetComm((PetscObject)dm, &comm);
1235: MPI_Comm_rank(comm, &rank);
1236: MPI_Comm_size(comm, &size);
1237: {
1238: const PetscInt *leaves;
1239: PetscSFNode *remotePoints, *rowners, *lowners;
1240: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1241: PetscInt pStart, pEnd;
1243: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1244: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1245: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1246: for (p=0; p<numRoots; p++) {
1247: rowners[p].rank = -1;
1248: rowners[p].index = -1;
1249: }
1250: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1251: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1252: for (p = 0; p < numLeaves; ++p) {
1253: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1254: lowners[p].rank = rank;
1255: lowners[p].index = leaves ? leaves[p] : p;
1256: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1257: lowners[p].rank = -2;
1258: lowners[p].index = -2;
1259: }
1260: }
1261: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1262: rowners[p].rank = -3;
1263: rowners[p].index = -3;
1264: }
1265: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1266: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1267: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1268: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE);
1269: for (p = 0; p < numLeaves; ++p) {
1270: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1271: if (lowners[p].rank != rank) ++numGhostPoints;
1272: }
1273: PetscMalloc1(numGhostPoints, &ghostPoints);
1274: PetscMalloc1(numGhostPoints, &remotePoints);
1275: for (p = 0, gp = 0; p < numLeaves; ++p) {
1276: if (lowners[p].rank != rank) {
1277: ghostPoints[gp] = leaves ? leaves[p] : p;
1278: remotePoints[gp].rank = lowners[p].rank;
1279: remotePoints[gp].index = lowners[p].index;
1280: ++gp;
1281: }
1282: }
1283: PetscFree2(rowners,lowners);
1284: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1285: PetscSFSetFromOptions((dmParallel)->sf);
1286: }
1287: {
1288: PetscBool useCone, useClosure, useAnchors;
1290: DMGetBasicAdjacency(dm, &useCone, &useClosure);
1291: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1292: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1293: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1294: }
1295: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1296: return(0);
1297: }
1299: /*@
1300: DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1302: Input Parameters:
1303: + dm - The DMPlex object
1304: - flg - Balance the partition?
1306: Level: intermediate
1308: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1309: @*/
1310: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1311: {
1312: DM_Plex *mesh = (DM_Plex *)dm->data;
1315: mesh->partitionBalance = flg;
1316: return(0);
1317: }
1319: /*@
1320: DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1322: Input Parameter:
1323: . dm - The DMPlex object
1325: Output Parameter:
1326: . flg - Balance the partition?
1328: Level: intermediate
1330: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1331: @*/
1332: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1333: {
1334: DM_Plex *mesh = (DM_Plex *)dm->data;
1339: *flg = mesh->partitionBalance;
1340: return(0);
1341: }
1343: typedef struct {
1344: PetscInt vote, rank, index;
1345: } Petsc3Int;
1347: /* MaxLoc, but carry a third piece of information around */
1348: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1349: {
1350: Petsc3Int *a = (Petsc3Int *)inout_;
1351: Petsc3Int *b = (Petsc3Int *)in_;
1352: PetscInt i, len = *len_;
1353: for (i = 0; i < len; i++) {
1354: if (a[i].vote < b[i].vote) {
1355: a[i].vote = b[i].vote;
1356: a[i].rank = b[i].rank;
1357: a[i].index = b[i].index;
1358: } else if (a[i].vote <= b[i].vote) {
1359: if (a[i].rank >= b[i].rank) {
1360: a[i].rank = b[i].rank;
1361: a[i].index = b[i].index;
1362: }
1363: }
1364: }
1365: }
1367: /*@C
1368: DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1370: Input Parameters:
1371: + dm - The source DMPlex object
1372: . migrationSF - The star forest that describes the parallel point remapping
1373: - ownership - Flag causing a vote to determine point ownership
1375: Output Parameter:
1376: . pointSF - The star forest describing the point overlap in the remapped DM
1378: Notes:
1379: Output pointSF is guaranteed to have the array of local indices (leaves) sorted.
1381: Level: developer
1383: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1384: @*/
1385: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1386: {
1387: PetscMPIInt rank, size;
1388: PetscInt p, nroots, nleaves, idx, npointLeaves;
1389: PetscInt *pointLocal;
1390: const PetscInt *leaves;
1391: const PetscSFNode *roots;
1392: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1393: Vec shifts;
1394: const PetscInt numShifts = 13759;
1395: const PetscScalar *shift = NULL;
1396: const PetscBool shiftDebug = PETSC_FALSE;
1397: PetscBool balance;
1398: PetscErrorCode ierr;
1402: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1403: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1404: PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);
1406: DMPlexGetPartitionBalance(dm, &balance);
1407: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1408: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1409: if (ownership) {
1410: MPI_Op op;
1411: MPI_Datatype datatype;
1412: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1413: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1414: if (balance) {
1415: PetscRandom r;
1417: PetscRandomCreate(PETSC_COMM_SELF, &r);
1418: PetscRandomSetInterval(r, 0, 2467*size);
1419: VecCreate(PETSC_COMM_SELF, &shifts);
1420: VecSetSizes(shifts, numShifts, numShifts);
1421: VecSetType(shifts, VECSTANDARD);
1422: VecSetRandom(shifts, r);
1423: PetscRandomDestroy(&r);
1424: VecGetArrayRead(shifts, &shift);
1425: }
1427: PetscMalloc1(nroots, &rootVote);
1428: PetscMalloc1(nleaves, &leafVote);
1429: /* Point ownership vote: Process with highest rank owns shared points */
1430: for (p = 0; p < nleaves; ++p) {
1431: if (shiftDebug) {
1432: PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1433: }
1434: /* Either put in a bid or we know we own it */
1435: leafVote[p].vote = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1436: leafVote[p].rank = rank;
1437: leafVote[p].index = p;
1438: }
1439: for (p = 0; p < nroots; p++) {
1440: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1441: rootVote[p].vote = -3;
1442: rootVote[p].rank = -3;
1443: rootVote[p].index = -3;
1444: }
1445: MPI_Type_contiguous(3, MPIU_INT, &datatype);
1446: MPI_Type_commit(&datatype);
1447: MPI_Op_create(&MaxLocCarry, 1, &op);
1448: PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1449: PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1450: MPI_Op_free(&op);
1451: MPI_Type_free(&datatype);
1452: for (p = 0; p < nroots; p++) {
1453: rootNodes[p].rank = rootVote[p].rank;
1454: rootNodes[p].index = rootVote[p].index;
1455: }
1456: PetscFree(leafVote);
1457: PetscFree(rootVote);
1458: } else {
1459: for (p = 0; p < nroots; p++) {
1460: rootNodes[p].index = -1;
1461: rootNodes[p].rank = rank;
1462: }
1463: for (p = 0; p < nleaves; p++) {
1464: /* Write new local id into old location */
1465: if (roots[p].rank == rank) {
1466: rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1467: }
1468: }
1469: }
1470: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);
1471: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE);
1473: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1474: if (leafNodes[p].rank != rank) npointLeaves++;
1475: }
1476: PetscMalloc1(npointLeaves, &pointLocal);
1477: PetscMalloc1(npointLeaves, &pointRemote);
1478: for (idx = 0, p = 0; p < nleaves; p++) {
1479: if (leafNodes[p].rank != rank) {
1480: /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1481: pointLocal[idx] = p;
1482: pointRemote[idx] = leafNodes[p];
1483: idx++;
1484: }
1485: }
1486: if (shift) {
1487: VecRestoreArrayRead(shifts, &shift);
1488: VecDestroy(&shifts);
1489: }
1490: if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1491: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1492: PetscSFSetFromOptions(*pointSF);
1493: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1494: PetscFree2(rootNodes, leafNodes);
1495: PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1496: return(0);
1497: }
1499: /*@C
1500: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1502: Collective on dm
1504: Input Parameters:
1505: + dm - The source DMPlex object
1506: - sf - The star forest communication context describing the migration pattern
1508: Output Parameter:
1509: . targetDM - The target DMPlex object
1511: Level: intermediate
1513: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1514: @*/
1515: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1516: {
1517: MPI_Comm comm;
1518: PetscInt dim, cdim, nroots;
1519: PetscSF sfPoint;
1520: ISLocalToGlobalMapping ltogMigration;
1521: ISLocalToGlobalMapping ltogOriginal = NULL;
1522: PetscBool flg;
1523: PetscErrorCode ierr;
1527: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1528: PetscObjectGetComm((PetscObject) dm, &comm);
1529: DMGetDimension(dm, &dim);
1530: DMSetDimension(targetDM, dim);
1531: DMGetCoordinateDim(dm, &cdim);
1532: DMSetCoordinateDim(targetDM, cdim);
1534: /* Check for a one-to-all distribution pattern */
1535: DMGetPointSF(dm, &sfPoint);
1536: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1537: if (nroots >= 0) {
1538: IS isOriginal;
1539: PetscInt n, size, nleaves;
1540: PetscInt *numbering_orig, *numbering_new;
1542: /* Get the original point numbering */
1543: DMPlexCreatePointNumbering(dm, &isOriginal);
1544: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1545: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1546: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1547: /* Convert to positive global numbers */
1548: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1549: /* Derive the new local-to-global mapping from the old one */
1550: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1551: PetscMalloc1(nleaves, &numbering_new);
1552: PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1553: PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE);
1554: ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration);
1555: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1556: ISDestroy(&isOriginal);
1557: } else {
1558: /* One-to-all distribution pattern: We can derive LToG from SF */
1559: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1560: }
1561: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1562: if (flg) {
1563: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1564: ISLocalToGlobalMappingView(ltogMigration, NULL);
1565: }
1566: /* Migrate DM data to target DM */
1567: DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1568: DMPlexDistributeLabels(dm, sf, targetDM);
1569: DMPlexDistributeCoordinates(dm, sf, targetDM);
1570: DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1571: ISLocalToGlobalMappingDestroy(<ogOriginal);
1572: ISLocalToGlobalMappingDestroy(<ogMigration);
1573: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1574: return(0);
1575: }
1577: /*@C
1578: DMPlexDistribute - Distributes the mesh and any associated sections.
1580: Collective on dm
1582: Input Parameters:
1583: + dm - The original DMPlex object
1584: - overlap - The overlap of partitions, 0 is the default
1586: Output Parameters:
1587: + sf - The PetscSF used for point distribution, or NULL if not needed
1588: - dmParallel - The distributed DMPlex object
1590: Note: If the mesh was not distributed, the output dmParallel will be NULL.
1592: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1593: representation on the mesh.
1595: Level: intermediate
1597: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1598: @*/
1599: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1600: {
1601: MPI_Comm comm;
1602: PetscPartitioner partitioner;
1603: IS cellPart;
1604: PetscSection cellPartSection;
1605: DM dmCoord;
1606: DMLabel lblPartition, lblMigration;
1607: PetscSF sfMigration, sfStratified, sfPoint;
1608: PetscBool flg, balance;
1609: PetscMPIInt rank, size;
1610: PetscErrorCode ierr;
1618: if (sf) *sf = NULL;
1619: *dmParallel = NULL;
1620: PetscObjectGetComm((PetscObject)dm,&comm);
1621: MPI_Comm_rank(comm, &rank);
1622: MPI_Comm_size(comm, &size);
1623: if (size == 1) return(0);
1625: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1626: /* Create cell partition */
1627: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1628: PetscSectionCreate(comm, &cellPartSection);
1629: DMPlexGetPartitioner(dm, &partitioner);
1630: PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1631: PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1632: {
1633: /* Convert partition to DMLabel */
1634: IS is;
1635: PetscHSetI ht;
1636: const PetscInt *points;
1637: PetscInt *iranks;
1638: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1640: DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1641: /* Preallocate strata */
1642: PetscHSetICreate(&ht);
1643: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1644: for (proc = pStart; proc < pEnd; proc++) {
1645: PetscSectionGetDof(cellPartSection, proc, &npoints);
1646: if (npoints) {PetscHSetIAdd(ht, proc);}
1647: }
1648: PetscHSetIGetSize(ht, &nranks);
1649: PetscMalloc1(nranks, &iranks);
1650: PetscHSetIGetElems(ht, &poff, iranks);
1651: PetscHSetIDestroy(&ht);
1652: DMLabelAddStrata(lblPartition, nranks, iranks);
1653: PetscFree(iranks);
1654: /* Inline DMPlexPartitionLabelClosure() */
1655: ISGetIndices(cellPart, &points);
1656: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1657: for (proc = pStart; proc < pEnd; proc++) {
1658: PetscSectionGetDof(cellPartSection, proc, &npoints);
1659: if (!npoints) continue;
1660: PetscSectionGetOffset(cellPartSection, proc, &poff);
1661: DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1662: DMLabelSetStratumIS(lblPartition, proc, is);
1663: ISDestroy(&is);
1664: }
1665: ISRestoreIndices(cellPart, &points);
1666: }
1667: PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);
1669: DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1670: DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1671: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1672: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1673: PetscSFDestroy(&sfMigration);
1674: sfMigration = sfStratified;
1675: PetscSFSetUp(sfMigration);
1676: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1677: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1678: if (flg) {
1679: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1680: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1681: }
1683: /* Create non-overlapping parallel DM and migrate internal data */
1684: DMPlexCreate(comm, dmParallel);
1685: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1686: DMPlexMigrate(dm, sfMigration, *dmParallel);
1688: /* Build the point SF without overlap */
1689: DMPlexGetPartitionBalance(dm, &balance);
1690: DMPlexSetPartitionBalance(*dmParallel, balance);
1691: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1692: DMSetPointSF(*dmParallel, sfPoint);
1693: DMGetCoordinateDM(*dmParallel, &dmCoord);
1694: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1695: if (flg) {PetscSFView(sfPoint, NULL);}
1697: if (overlap > 0) {
1698: DM dmOverlap;
1699: PetscInt nroots, nleaves, noldleaves, l;
1700: const PetscInt *oldLeaves;
1701: PetscSFNode *newRemote, *permRemote;
1702: const PetscSFNode *oldRemote;
1703: PetscSF sfOverlap, sfOverlapPoint;
1705: /* Add the partition overlap to the distributed DM */
1706: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1707: DMDestroy(dmParallel);
1708: *dmParallel = dmOverlap;
1709: if (flg) {
1710: PetscPrintf(comm, "Overlap Migration SF:\n");
1711: PetscSFView(sfOverlap, NULL);
1712: }
1714: /* Re-map the migration SF to establish the full migration pattern */
1715: PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1716: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1717: PetscMalloc1(nleaves, &newRemote);
1718: /* oldRemote: original root point mapping to original leaf point
1719: newRemote: original leaf point mapping to overlapped leaf point */
1720: if (oldLeaves) {
1721: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1722: PetscMalloc1(noldleaves, &permRemote);
1723: for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1724: oldRemote = permRemote;
1725: }
1726: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1727: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE);
1728: if (oldLeaves) {PetscFree(oldRemote);}
1729: PetscSFCreate(comm, &sfOverlapPoint);
1730: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1731: PetscSFDestroy(&sfOverlap);
1732: PetscSFDestroy(&sfMigration);
1733: sfMigration = sfOverlapPoint;
1734: }
1735: /* Cleanup Partition */
1736: DMLabelDestroy(&lblPartition);
1737: DMLabelDestroy(&lblMigration);
1738: PetscSectionDestroy(&cellPartSection);
1739: ISDestroy(&cellPart);
1740: /* Copy discretization */
1741: DMCopyDisc(dm, *dmParallel);
1742: /* Create sfNatural */
1743: if (dm->useNatural) {
1744: PetscSection section;
1746: DMGetLocalSection(dm, §ion);
1747: DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1748: DMSetUseNatural(*dmParallel, PETSC_TRUE);
1749: }
1750: /* Cleanup */
1751: if (sf) {*sf = sfMigration;}
1752: else {PetscSFDestroy(&sfMigration);}
1753: PetscSFDestroy(&sfPoint);
1754: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1755: return(0);
1756: }
1758: /*@C
1759: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1761: Collective on dm
1763: Input Parameters:
1764: + dm - The non-overlapping distributed DMPlex object
1765: - overlap - The overlap of partitions (the same on all ranks)
1767: Output Parameters:
1768: + sf - The PetscSF used for point distribution
1769: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1771: Notes:
1772: If the mesh was not distributed, the return value is NULL.
1774: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1775: representation on the mesh.
1777: Level: advanced
1779: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1780: @*/
1781: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1782: {
1783: MPI_Comm comm;
1784: PetscMPIInt size, rank;
1785: PetscSection rootSection, leafSection;
1786: IS rootrank, leafrank;
1787: DM dmCoord;
1788: DMLabel lblOverlap;
1789: PetscSF sfOverlap, sfStratified, sfPoint;
1790: PetscErrorCode ierr;
1798: if (sf) *sf = NULL;
1799: *dmOverlap = NULL;
1800: PetscObjectGetComm((PetscObject)dm,&comm);
1801: MPI_Comm_size(comm, &size);
1802: MPI_Comm_rank(comm, &rank);
1803: if (size == 1) return(0);
1805: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1806: /* Compute point overlap with neighbouring processes on the distributed DM */
1807: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1808: PetscSectionCreate(comm, &rootSection);
1809: PetscSectionCreate(comm, &leafSection);
1810: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1811: DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1812: /* Convert overlap label to stratified migration SF */
1813: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1814: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1815: PetscSFDestroy(&sfOverlap);
1816: sfOverlap = sfStratified;
1817: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1818: PetscSFSetFromOptions(sfOverlap);
1820: PetscSectionDestroy(&rootSection);
1821: PetscSectionDestroy(&leafSection);
1822: ISDestroy(&rootrank);
1823: ISDestroy(&leafrank);
1824: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1826: /* Build the overlapping DM */
1827: DMPlexCreate(comm, dmOverlap);
1828: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1829: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1830: /* Store the overlap in the new DM */
1831: ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1832: /* Build the new point SF */
1833: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1834: DMSetPointSF(*dmOverlap, sfPoint);
1835: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1836: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1837: PetscSFDestroy(&sfPoint);
1838: /* Cleanup overlap partition */
1839: DMLabelDestroy(&lblOverlap);
1840: if (sf) *sf = sfOverlap;
1841: else {PetscSFDestroy(&sfOverlap);}
1842: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1843: return(0);
1844: }
1846: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1847: {
1848: DM_Plex *mesh = (DM_Plex*) dm->data;
1851: *overlap = mesh->overlap;
1852: return(0);
1853: }
1855: /*@
1856: DMPlexGetOverlap - Get the DMPlex partition overlap.
1858: Not collective
1860: Input Parameter:
1861: . dm - The DM
1863: Output Parameter:
1864: . overlap - The overlap of this DM
1866: Level: intermediate
1868: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1869: @*/
1870: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1871: {
1872: PetscErrorCode ierr;
1876: PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1877: return(0);
1878: }
1880: /*@C
1881: DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1882: root process of the original's communicator.
1884: Collective on dm
1886: Input Parameter:
1887: . dm - the original DMPlex object
1889: Output Parameters:
1890: + sf - the PetscSF used for point distribution (optional)
1891: - gatherMesh - the gathered DM object, or NULL
1893: Level: intermediate
1895: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1896: @*/
1897: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1898: {
1899: MPI_Comm comm;
1900: PetscMPIInt size;
1901: PetscPartitioner oldPart, gatherPart;
1907: *gatherMesh = NULL;
1908: if (sf) *sf = NULL;
1909: comm = PetscObjectComm((PetscObject)dm);
1910: MPI_Comm_size(comm,&size);
1911: if (size == 1) return(0);
1912: DMPlexGetPartitioner(dm,&oldPart);
1913: PetscObjectReference((PetscObject)oldPart);
1914: PetscPartitionerCreate(comm,&gatherPart);
1915: PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1916: DMPlexSetPartitioner(dm,gatherPart);
1917: DMPlexDistribute(dm,0,sf,gatherMesh);
1919: DMPlexSetPartitioner(dm,oldPart);
1920: PetscPartitionerDestroy(&gatherPart);
1921: PetscPartitionerDestroy(&oldPart);
1922: return(0);
1923: }
1925: /*@C
1926: DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1928: Collective on dm
1930: Input Parameter:
1931: . dm - the original DMPlex object
1933: Output Parameters:
1934: + sf - the PetscSF used for point distribution (optional)
1935: - redundantMesh - the redundant DM object, or NULL
1937: Level: intermediate
1939: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1940: @*/
1941: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1942: {
1943: MPI_Comm comm;
1944: PetscMPIInt size, rank;
1945: PetscInt pStart, pEnd, p;
1946: PetscInt numPoints = -1;
1947: PetscSF migrationSF, sfPoint, gatherSF;
1948: DM gatherDM, dmCoord;
1949: PetscSFNode *points;
1955: *redundantMesh = NULL;
1956: comm = PetscObjectComm((PetscObject)dm);
1957: MPI_Comm_size(comm,&size);
1958: if (size == 1) {
1959: PetscObjectReference((PetscObject) dm);
1960: *redundantMesh = dm;
1961: if (sf) *sf = NULL;
1962: return(0);
1963: }
1964: DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1965: if (!gatherDM) return(0);
1966: MPI_Comm_rank(comm,&rank);
1967: DMPlexGetChart(gatherDM,&pStart,&pEnd);
1968: numPoints = pEnd - pStart;
1969: MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1970: PetscMalloc1(numPoints,&points);
1971: PetscSFCreate(comm,&migrationSF);
1972: for (p = 0; p < numPoints; p++) {
1973: points[p].index = p;
1974: points[p].rank = 0;
1975: }
1976: PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1977: DMPlexCreate(comm, redundantMesh);
1978: PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1979: DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1980: DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1981: DMSetPointSF(*redundantMesh, sfPoint);
1982: DMGetCoordinateDM(*redundantMesh, &dmCoord);
1983: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1984: PetscSFDestroy(&sfPoint);
1985: if (sf) {
1986: PetscSF tsf;
1988: PetscSFCompose(gatherSF,migrationSF,&tsf);
1989: DMPlexStratifyMigrationSF(dm, tsf, sf);
1990: PetscSFDestroy(&tsf);
1991: }
1992: PetscSFDestroy(&migrationSF);
1993: PetscSFDestroy(&gatherSF);
1994: DMDestroy(&gatherDM);
1995: return(0);
1996: }
1998: /*@
1999: DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.
2001: Collective
2003: Input Parameter:
2004: . dm - The DM object
2006: Output Parameter:
2007: . distributed - Flag whether the DM is distributed
2009: Level: intermediate
2011: Notes:
2012: This currently finds out whether at least two ranks have any DAG points.
2013: This involves MPI_Allreduce() with one integer.
2014: The result is currently not stashed so every call to this routine involves this global communication.
2016: .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2017: @*/
2018: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2019: {
2020: PetscInt pStart, pEnd, count;
2021: MPI_Comm comm;
2022: PetscMPIInt size;
2023: PetscErrorCode ierr;
2028: PetscObjectGetComm((PetscObject)dm,&comm);
2029: MPI_Comm_size(comm,&size);
2030: if (size == 1) { *distributed = PETSC_FALSE; return(0); }
2031: DMPlexGetChart(dm, &pStart, &pEnd);
2032: count = (pEnd - pStart) > 0 ? 1 : 0;
2033: MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2034: *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2035: return(0);
2036: }