Actual source code: plexdistribute.c
petsc-3.12.5 2020-03-29
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be NULL (to clear the callback)
10: - ctx - context for callback evaluation, may be NULL
12: Level: advanced
14: Notes:
15: The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of DMPlex adjacency determination.
19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: return(0);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The DM object
38: Output Parameters:
39: - user - The user callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: return(0);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The DM object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *) dm->data;
74: mesh->useAnchors = useAnchors;
75: return(0);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The DM object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *) dm->data;
98: *useAnchors = mesh->useAnchors;
99: return(0);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106: PetscErrorCode ierr;
109: DMPlexGetConeSize(dm, p, &coneSize);
110: DMPlexGetCone(dm, p, &cone);
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c-1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: DMPlexGetSupportSize(dm, point, &supportSize);
117: DMPlexGetSupport(dm, point, &support);
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: return(0);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscErrorCode ierr;
136: DMPlexGetSupportSize(dm, p, &supportSize);
137: DMPlexGetSupport(dm, p, &support);
138: for (s = 0; s <= supportSize; ++s) {
139: const PetscInt point = !s ? p : support[s-1];
140: const PetscInt *cone = NULL;
141: PetscInt coneSize, c, q;
143: DMPlexGetConeSize(dm, point, &coneSize);
144: DMPlexGetCone(dm, point, &cone);
145: for (c = 0; c < coneSize; ++c) {
146: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147: if (cone[c] == adj[q]) break;
148: }
149: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150: }
151: }
152: *adjSize = numAdj;
153: return(0);
154: }
156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158: PetscInt *star = NULL;
159: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
163: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164: for (s = 0; s < starSize*2; s += 2) {
165: const PetscInt *closure = NULL;
166: PetscInt closureSize, c, q;
168: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169: for (c = 0; c < closureSize*2; c += 2) {
170: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171: if (closure[c] == adj[q]) break;
172: }
173: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174: }
175: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176: }
177: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178: *adjSize = numAdj;
179: return(0);
180: }
182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184: static PetscInt asiz = 0;
185: PetscInt maxAnchors = 1;
186: PetscInt aStart = -1, aEnd = -1;
187: PetscInt maxAdjSize;
188: PetscSection aSec = NULL;
189: IS aIS = NULL;
190: const PetscInt *anchors;
191: DM_Plex *mesh = (DM_Plex *)dm->data;
192: PetscErrorCode ierr;
195: if (useAnchors) {
196: DMPlexGetAnchors(dm,&aSec,&aIS);
197: if (aSec) {
198: PetscSectionGetMaxDof(aSec,&maxAnchors);
199: maxAnchors = PetscMax(1,maxAnchors);
200: PetscSectionGetChart(aSec,&aStart,&aEnd);
201: ISGetIndices(aIS,&anchors);
202: }
203: }
204: if (!*adj) {
205: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
207: DMPlexGetChart(dm, &pStart,&pEnd);
208: DMPlexGetDepth(dm, &depth);
209: DMPlexGetMaxSizes(dm, &maxC, &maxS);
210: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
211: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
212: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
213: asiz *= maxAnchors;
214: asiz = PetscMin(asiz,pEnd-pStart);
215: PetscMalloc1(asiz,adj);
216: }
217: if (*adjSize < 0) *adjSize = asiz;
218: maxAdjSize = *adjSize;
219: if (mesh->useradjacency) {
220: (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
221: } else if (useTransitiveClosure) {
222: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
223: } else if (useCone) {
224: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
225: } else {
226: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
227: }
228: if (useAnchors && aSec) {
229: PetscInt origSize = *adjSize;
230: PetscInt numAdj = origSize;
231: PetscInt i = 0, j;
232: PetscInt *orig = *adj;
234: while (i < origSize) {
235: PetscInt p = orig[i];
236: PetscInt aDof = 0;
238: if (p >= aStart && p < aEnd) {
239: PetscSectionGetDof(aSec,p,&aDof);
240: }
241: if (aDof) {
242: PetscInt aOff;
243: PetscInt s, q;
245: for (j = i + 1; j < numAdj; j++) {
246: orig[j - 1] = orig[j];
247: }
248: origSize--;
249: numAdj--;
250: PetscSectionGetOffset(aSec,p,&aOff);
251: for (s = 0; s < aDof; ++s) {
252: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
253: if (anchors[aOff+s] == orig[q]) break;
254: }
255: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
256: }
257: }
258: else {
259: i++;
260: }
261: }
262: *adjSize = numAdj;
263: ISRestoreIndices(aIS,&anchors);
264: }
265: return(0);
266: }
268: /*@
269: DMPlexGetAdjacency - Return all points adjacent to the given point
271: Input Parameters:
272: + dm - The DM object
273: . p - The point
274: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
275: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize
277: Output Parameters:
278: + adjSize - The number of adjacent points
279: - adj - The adjacent points
281: Level: advanced
283: Notes:
284: The user must PetscFree the adj array if it was not passed in.
286: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
287: @*/
288: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
289: {
290: PetscBool useCone, useClosure, useAnchors;
297: DMGetBasicAdjacency(dm, &useCone, &useClosure);
298: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
299: DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
300: return(0);
301: }
303: /*@
304: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
306: Collective on dm
308: Input Parameters:
309: + dm - The DM
310: - sfPoint - The PetscSF which encodes point connectivity
312: Output Parameters:
313: + processRanks - A list of process neighbors, or NULL
314: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
316: Level: developer
318: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319: @*/
320: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321: {
322: const PetscSFNode *remotePoints;
323: PetscInt *localPointsNew;
324: PetscSFNode *remotePointsNew;
325: const PetscInt *nranks;
326: PetscInt *ranksNew;
327: PetscBT neighbors;
328: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329: PetscMPIInt size, proc, rank;
330: PetscErrorCode ierr;
337: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
338: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
339: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
340: PetscBTCreate(size, &neighbors);
341: PetscBTMemzero(size, neighbors);
342: /* Compute root-to-leaf process connectivity */
343: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
344: ISGetIndices(rootRanks, &nranks);
345: for (p = pStart; p < pEnd; ++p) {
346: PetscInt ndof, noff, n;
348: PetscSectionGetDof(rootRankSection, p, &ndof);
349: PetscSectionGetOffset(rootRankSection, p, &noff);
350: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
351: }
352: ISRestoreIndices(rootRanks, &nranks);
353: /* Compute leaf-to-neighbor process connectivity */
354: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
355: ISGetIndices(leafRanks, &nranks);
356: for (p = pStart; p < pEnd; ++p) {
357: PetscInt ndof, noff, n;
359: PetscSectionGetDof(leafRankSection, p, &ndof);
360: PetscSectionGetOffset(leafRankSection, p, &noff);
361: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
362: }
363: ISRestoreIndices(leafRanks, &nranks);
364: /* Compute leaf-to-root process connectivity */
365: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
366: /* Calculate edges */
367: PetscBTClear(neighbors, rank);
368: for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
369: PetscMalloc1(numNeighbors, &ranksNew);
370: PetscMalloc1(numNeighbors, &localPointsNew);
371: PetscMalloc1(numNeighbors, &remotePointsNew);
372: for(proc = 0, n = 0; proc < size; ++proc) {
373: if (PetscBTLookup(neighbors, proc)) {
374: ranksNew[n] = proc;
375: localPointsNew[n] = proc;
376: remotePointsNew[n].index = rank;
377: remotePointsNew[n].rank = proc;
378: ++n;
379: }
380: }
381: PetscBTDestroy(&neighbors);
382: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
383: else {PetscFree(ranksNew);}
384: if (sfProcess) {
385: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
386: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
387: PetscSFSetFromOptions(*sfProcess);
388: PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
389: }
390: return(0);
391: }
393: /*@
394: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
396: Collective on dm
398: Input Parameter:
399: . dm - The DM
401: Output Parameters:
402: + rootSection - The number of leaves for a given root point
403: . rootrank - The rank of each edge into the root point
404: . leafSection - The number of processes sharing a given leaf point
405: - leafrank - The rank of each process sharing a leaf point
407: Level: developer
409: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
410: @*/
411: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
412: {
413: MPI_Comm comm;
414: PetscSF sfPoint;
415: const PetscInt *rootdegree;
416: PetscInt *myrank, *remoterank;
417: PetscInt pStart, pEnd, p, nedges;
418: PetscMPIInt rank;
419: PetscErrorCode ierr;
422: PetscObjectGetComm((PetscObject) dm, &comm);
423: MPI_Comm_rank(comm, &rank);
424: DMPlexGetChart(dm, &pStart, &pEnd);
425: DMGetPointSF(dm, &sfPoint);
426: /* Compute number of leaves for each root */
427: PetscObjectSetName((PetscObject) rootSection, "Root Section");
428: PetscSectionSetChart(rootSection, pStart, pEnd);
429: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
430: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
431: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
432: PetscSectionSetUp(rootSection);
433: /* Gather rank of each leaf to root */
434: PetscSectionGetStorageSize(rootSection, &nedges);
435: PetscMalloc1(pEnd-pStart, &myrank);
436: PetscMalloc1(nedges, &remoterank);
437: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
438: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
439: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
440: PetscFree(myrank);
441: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
442: /* Distribute remote ranks to leaves */
443: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
444: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
445: return(0);
446: }
448: /*@C
449: DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.
451: Collective on dm
453: Input Parameters:
454: + dm - The DM
455: . levels - Number of overlap levels
456: . rootSection - The number of leaves for a given root point
457: . rootrank - The rank of each edge into the root point
458: . leafSection - The number of processes sharing a given leaf point
459: - leafrank - The rank of each process sharing a leaf point
461: Output Parameters:
462: . ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
464: Level: developer
466: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
467: @*/
468: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
469: {
470: MPI_Comm comm;
471: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
472: PetscSF sfPoint;
473: const PetscSFNode *remote;
474: const PetscInt *local;
475: const PetscInt *nrank, *rrank;
476: PetscInt *adj = NULL;
477: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
478: PetscMPIInt rank, size;
479: PetscBool flg;
480: PetscErrorCode ierr;
483: *ovLabel = NULL;
484: PetscObjectGetComm((PetscObject) dm, &comm);
485: MPI_Comm_size(comm, &size);
486: MPI_Comm_rank(comm, &rank);
487: if (size == 1) return(0);
488: DMGetPointSF(dm, &sfPoint);
489: DMPlexGetChart(dm, &pStart, &pEnd);
490: PetscSectionGetChart(leafSection, &sStart, &sEnd);
491: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
492: DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
493: /* Handle leaves: shared with the root point */
494: for (l = 0; l < nleaves; ++l) {
495: PetscInt adjSize = PETSC_DETERMINE, a;
497: DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
498: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
499: }
500: ISGetIndices(rootrank, &rrank);
501: ISGetIndices(leafrank, &nrank);
502: /* Handle roots */
503: for (p = pStart; p < pEnd; ++p) {
504: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
506: if ((p >= sStart) && (p < sEnd)) {
507: /* Some leaves share a root with other leaves on different processes */
508: PetscSectionGetDof(leafSection, p, &neighbors);
509: if (neighbors) {
510: PetscSectionGetOffset(leafSection, p, &noff);
511: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
512: for (n = 0; n < neighbors; ++n) {
513: const PetscInt remoteRank = nrank[noff+n];
515: if (remoteRank == rank) continue;
516: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
517: }
518: }
519: }
520: /* Roots are shared with leaves */
521: PetscSectionGetDof(rootSection, p, &neighbors);
522: if (!neighbors) continue;
523: PetscSectionGetOffset(rootSection, p, &noff);
524: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
525: for (n = 0; n < neighbors; ++n) {
526: const PetscInt remoteRank = rrank[noff+n];
528: if (remoteRank == rank) continue;
529: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
530: }
531: }
532: PetscFree(adj);
533: ISRestoreIndices(rootrank, &rrank);
534: ISRestoreIndices(leafrank, &nrank);
535: /* Add additional overlap levels */
536: for (l = 1; l < levels; l++) {
537: /* Propagate point donations over SF to capture remote connections */
538: DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
539: /* Add next level of point donations to the label */
540: DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
541: }
542: /* We require the closure in the overlap */
543: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
544: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
545: if (flg) {
546: PetscViewer viewer;
547: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
548: DMLabelView(ovAdjByRank, viewer);
549: }
550: /* Invert sender to receiver label */
551: DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
552: DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
553: /* Add owned points, except for shared local points */
554: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
555: for (l = 0; l < nleaves; ++l) {
556: DMLabelClearValue(*ovLabel, local[l], rank);
557: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
558: }
559: /* Clean up */
560: DMLabelDestroy(&ovAdjByRank);
561: return(0);
562: }
564: /*@C
565: DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
567: Collective on dm
569: Input Parameters:
570: + dm - The DM
571: - overlapSF - The SF mapping ghost points in overlap to owner points on other processes
573: Output Parameters:
574: . migrationSF - An SF that maps original points in old locations to points in new locations
576: Level: developer
578: .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
579: @*/
580: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
581: {
582: MPI_Comm comm;
583: PetscMPIInt rank, size;
584: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
585: PetscInt *pointDepths, *remoteDepths, *ilocal;
586: PetscInt *depthRecv, *depthShift, *depthIdx;
587: PetscSFNode *iremote;
588: PetscSF pointSF;
589: const PetscInt *sharedLocal;
590: const PetscSFNode *overlapRemote, *sharedRemote;
591: PetscErrorCode ierr;
595: PetscObjectGetComm((PetscObject)dm, &comm);
596: MPI_Comm_rank(comm, &rank);
597: MPI_Comm_size(comm, &size);
598: DMGetDimension(dm, &dim);
600: /* Before building the migration SF we need to know the new stratum offsets */
601: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
602: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
603: for (d=0; d<dim+1; d++) {
604: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
605: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
606: }
607: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
608: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
609: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);
611: /* Count recevied points in each stratum and compute the internal strata shift */
612: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
613: for (d=0; d<dim+1; d++) depthRecv[d]=0;
614: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
615: depthShift[dim] = 0;
616: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
617: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
618: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
619: for (d=0; d<dim+1; d++) {
620: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
621: depthIdx[d] = pStart + depthShift[d];
622: }
624: /* Form the overlap SF build an SF that describes the full overlap migration SF */
625: DMPlexGetChart(dm, &pStart, &pEnd);
626: newLeaves = pEnd - pStart + nleaves;
627: PetscMalloc1(newLeaves, &ilocal);
628: PetscMalloc1(newLeaves, &iremote);
629: /* First map local points to themselves */
630: for (d=0; d<dim+1; d++) {
631: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
632: for (p=pStart; p<pEnd; p++) {
633: point = p + depthShift[d];
634: ilocal[point] = point;
635: iremote[point].index = p;
636: iremote[point].rank = rank;
637: depthIdx[d]++;
638: }
639: }
641: /* Add in the remote roots for currently shared points */
642: DMGetPointSF(dm, &pointSF);
643: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
644: for (d=0; d<dim+1; d++) {
645: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
646: for (p=0; p<numSharedPoints; p++) {
647: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
648: point = sharedLocal[p] + depthShift[d];
649: iremote[point].index = sharedRemote[p].index;
650: iremote[point].rank = sharedRemote[p].rank;
651: }
652: }
653: }
655: /* Now add the incoming overlap points */
656: for (p=0; p<nleaves; p++) {
657: point = depthIdx[remoteDepths[p]];
658: ilocal[point] = point;
659: iremote[point].index = overlapRemote[p].index;
660: iremote[point].rank = overlapRemote[p].rank;
661: depthIdx[remoteDepths[p]]++;
662: }
663: PetscFree2(pointDepths,remoteDepths);
665: PetscSFCreate(comm, migrationSF);
666: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
667: PetscSFSetFromOptions(*migrationSF);
668: DMPlexGetChart(dm, &pStart, &pEnd);
669: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
671: PetscFree3(depthRecv, depthShift, depthIdx);
672: return(0);
673: }
675: /*@
676: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
678: Input Parameter:
679: + dm - The DM
680: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
682: Output Parameter:
683: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
685: Level: developer
687: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
688: @*/
689: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
690: {
691: MPI_Comm comm;
692: PetscMPIInt rank, size;
693: PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
694: PetscInt *pointDepths, *remoteDepths, *ilocal;
695: PetscInt *depthRecv, *depthShift, *depthIdx;
696: PetscInt hybEnd[4];
697: const PetscSFNode *iremote;
698: PetscErrorCode ierr;
702: PetscObjectGetComm((PetscObject) dm, &comm);
703: MPI_Comm_rank(comm, &rank);
704: MPI_Comm_size(comm, &size);
705: DMPlexGetDepth(dm, &ldepth);
706: MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
707: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
708: PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);
710: /* Before building the migration SF we need to know the new stratum offsets */
711: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
712: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
713: DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);
714: for (d = 0; d < depth+1; ++d) {
715: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
716: for (p = pStart; p < pEnd; ++p) {
717: if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
718: pointDepths[p] = 2 * d;
719: } else {
720: pointDepths[p] = 2 * d + 1;
721: }
722: }
723: }
724: for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
725: PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
726: PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
727: /* Count received points in each stratum and compute the internal strata shift */
728: PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
729: for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
730: for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
731: depthShift[2*depth+1] = 0;
732: for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
733: for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
734: depthShift[0] += depthRecv[1];
735: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
736: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
737: for (d = 2 * depth-1; d > 2; --d) {
738: PetscInt e;
740: for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
741: }
742: for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
743: /* Derive a new local permutation based on stratified indices */
744: PetscMalloc1(nleaves, &ilocal);
745: for (p = 0; p < nleaves; ++p) {
746: const PetscInt dep = remoteDepths[p];
748: ilocal[p] = depthShift[dep] + depthIdx[dep];
749: depthIdx[dep]++;
750: }
751: PetscSFCreate(comm, migrationSF);
752: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
753: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
754: PetscFree2(pointDepths,remoteDepths);
755: PetscFree3(depthRecv, depthShift, depthIdx);
756: PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);
757: return(0);
758: }
760: /*@
761: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
763: Collective on dm
765: Input Parameters:
766: + dm - The DMPlex object
767: . pointSF - The PetscSF describing the communication pattern
768: . originalSection - The PetscSection for existing data layout
769: - originalVec - The existing data in a local vector
771: Output Parameters:
772: + newSection - The PetscSF describing the new data layout
773: - newVec - The new data in a local vector
775: Level: developer
777: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
778: @*/
779: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
780: {
781: PetscSF fieldSF;
782: PetscInt *remoteOffsets, fieldSize;
783: PetscScalar *originalValues, *newValues;
787: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
788: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
790: PetscSectionGetStorageSize(newSection, &fieldSize);
791: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
792: VecSetType(newVec,dm->vectype);
794: VecGetArray(originalVec, &originalValues);
795: VecGetArray(newVec, &newValues);
796: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
797: PetscFree(remoteOffsets);
798: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
799: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
800: PetscSFDestroy(&fieldSF);
801: VecRestoreArray(newVec, &newValues);
802: VecRestoreArray(originalVec, &originalValues);
803: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
804: return(0);
805: }
807: /*@
808: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
810: Collective on dm
812: Input Parameters:
813: + dm - The DMPlex object
814: . pointSF - The PetscSF describing the communication pattern
815: . originalSection - The PetscSection for existing data layout
816: - originalIS - The existing data
818: Output Parameters:
819: + newSection - The PetscSF describing the new data layout
820: - newIS - The new data
822: Level: developer
824: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
825: @*/
826: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
827: {
828: PetscSF fieldSF;
829: PetscInt *newValues, *remoteOffsets, fieldSize;
830: const PetscInt *originalValues;
831: PetscErrorCode ierr;
834: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
835: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
837: PetscSectionGetStorageSize(newSection, &fieldSize);
838: PetscMalloc1(fieldSize, &newValues);
840: ISGetIndices(originalIS, &originalValues);
841: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
842: PetscFree(remoteOffsets);
843: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
844: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
845: PetscSFDestroy(&fieldSF);
846: ISRestoreIndices(originalIS, &originalValues);
847: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
848: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
849: return(0);
850: }
852: /*@
853: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
855: Collective on dm
857: Input Parameters:
858: + dm - The DMPlex object
859: . pointSF - The PetscSF describing the communication pattern
860: . originalSection - The PetscSection for existing data layout
861: . datatype - The type of data
862: - originalData - The existing data
864: Output Parameters:
865: + newSection - The PetscSection describing the new data layout
866: - newData - The new data
868: Level: developer
870: .seealso: DMPlexDistribute(), DMPlexDistributeField()
871: @*/
872: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
873: {
874: PetscSF fieldSF;
875: PetscInt *remoteOffsets, fieldSize;
876: PetscMPIInt dataSize;
880: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
881: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
883: PetscSectionGetStorageSize(newSection, &fieldSize);
884: MPI_Type_size(datatype, &dataSize);
885: PetscMalloc(fieldSize * dataSize, newData);
887: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
888: PetscFree(remoteOffsets);
889: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
890: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
891: PetscSFDestroy(&fieldSF);
892: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
893: return(0);
894: }
896: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
897: {
898: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
899: MPI_Comm comm;
900: PetscSF coneSF;
901: PetscSection originalConeSection, newConeSection;
902: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
903: PetscBool flg;
904: PetscErrorCode ierr;
909: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
910: /* Distribute cone section */
911: PetscObjectGetComm((PetscObject)dm, &comm);
912: DMPlexGetConeSection(dm, &originalConeSection);
913: DMPlexGetConeSection(dmParallel, &newConeSection);
914: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
915: DMSetUp(dmParallel);
916: {
917: PetscInt pStart, pEnd, p;
919: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
920: for (p = pStart; p < pEnd; ++p) {
921: PetscInt coneSize;
922: PetscSectionGetDof(newConeSection, p, &coneSize);
923: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
924: }
925: }
926: /* Communicate and renumber cones */
927: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
928: PetscFree(remoteOffsets);
929: DMPlexGetCones(dm, &cones);
930: if (original) {
931: PetscInt numCones;
933: PetscSectionGetStorageSize(originalConeSection,&numCones);
934: PetscMalloc1(numCones,&globCones);
935: ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
936: } else {
937: globCones = cones;
938: }
939: DMPlexGetCones(dmParallel, &newCones);
940: PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
941: PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
942: if (original) {
943: PetscFree(globCones);
944: }
945: PetscSectionGetStorageSize(newConeSection, &newConesSize);
946: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
947: #if defined(PETSC_USE_DEBUG)
948: {
949: PetscInt p;
950: PetscBool valid = PETSC_TRUE;
951: for (p = 0; p < newConesSize; ++p) {
952: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
953: }
954: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
955: }
956: #endif
957: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
958: if (flg) {
959: PetscPrintf(comm, "Serial Cone Section:\n");
960: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
961: PetscPrintf(comm, "Parallel Cone Section:\n");
962: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
963: PetscSFView(coneSF, NULL);
964: }
965: DMPlexGetConeOrientations(dm, &cones);
966: DMPlexGetConeOrientations(dmParallel, &newCones);
967: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
968: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
969: PetscSFDestroy(&coneSF);
970: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
971: /* Create supports and stratify DMPlex */
972: {
973: PetscInt pStart, pEnd;
975: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
976: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
977: }
978: DMPlexSymmetrize(dmParallel);
979: DMPlexStratify(dmParallel);
980: {
981: PetscBool useCone, useClosure, useAnchors;
983: DMGetBasicAdjacency(dm, &useCone, &useClosure);
984: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
985: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
986: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
987: }
988: return(0);
989: }
991: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
992: {
993: MPI_Comm comm;
994: PetscSection originalCoordSection, newCoordSection;
995: Vec originalCoordinates, newCoordinates;
996: PetscInt bs;
997: PetscBool isper;
998: const char *name;
999: const PetscReal *maxCell, *L;
1000: const DMBoundaryType *bd;
1001: PetscErrorCode ierr;
1007: PetscObjectGetComm((PetscObject)dm, &comm);
1008: DMGetCoordinateSection(dm, &originalCoordSection);
1009: DMGetCoordinateSection(dmParallel, &newCoordSection);
1010: DMGetCoordinatesLocal(dm, &originalCoordinates);
1011: if (originalCoordinates) {
1012: VecCreate(PETSC_COMM_SELF, &newCoordinates);
1013: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1014: PetscObjectSetName((PetscObject) newCoordinates, name);
1016: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1017: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1018: VecGetBlockSize(originalCoordinates, &bs);
1019: VecSetBlockSize(newCoordinates, bs);
1020: VecDestroy(&newCoordinates);
1021: }
1022: DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1023: DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1024: return(0);
1025: }
1027: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1028: {
1029: DM_Plex *mesh = (DM_Plex*) dm->data;
1030: MPI_Comm comm;
1031: DMLabel depthLabel;
1032: PetscMPIInt rank;
1033: PetscInt depth, d, numLabels, numLocalLabels, l;
1034: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1035: PetscObjectState depthState = -1;
1036: PetscErrorCode ierr;
1042: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1043: PetscObjectGetComm((PetscObject)dm, &comm);
1044: MPI_Comm_rank(comm, &rank);
1046: /* If the user has changed the depth label, communicate it instead */
1047: DMPlexGetDepth(dm, &depth);
1048: DMPlexGetDepthLabel(dm, &depthLabel);
1049: if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1050: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1051: MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1052: if (sendDepth) {
1053: DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1054: DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1055: }
1056: /* Everyone must have either the same number of labels, or none */
1057: DMGetNumLabels(dm, &numLocalLabels);
1058: numLabels = numLocalLabels;
1059: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1060: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1061: for (l = numLabels-1; l >= 0; --l) {
1062: DMLabel label = NULL, labelNew = NULL;
1063: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1064: const char *name = NULL;
1066: if (hasLabels) {
1067: DMGetLabelByNum(dm, l, &label);
1068: /* Skip "depth" because it is recreated */
1069: PetscObjectGetName((PetscObject) label, &name);
1070: PetscStrcmp(name, "depth", &isDepth);
1071: }
1072: MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1073: if (isDepth && !sendDepth) continue;
1074: DMLabelDistribute(label, migrationSF, &labelNew);
1075: if (isDepth) {
1076: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1077: PetscInt gdepth;
1079: MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1080: if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1081: for (d = 0; d <= gdepth; ++d) {
1082: PetscBool has;
1084: DMLabelHasStratum(labelNew, d, &has);
1085: if (!has) {DMLabelAddStratum(labelNew, d);}
1086: }
1087: }
1088: DMAddLabel(dmParallel, labelNew);
1089: /* Put the output flag in the new label */
1090: if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1091: MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1092: PetscObjectGetName((PetscObject) labelNew, &name);
1093: DMSetLabelOutput(dmParallel, name, isOutput);
1094: DMLabelDestroy(&labelNew);
1095: }
1096: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1097: return(0);
1098: }
1100: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1101: {
1102: DM_Plex *mesh = (DM_Plex*) dm->data;
1103: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1104: PetscBool *isHybrid, *isHybridParallel;
1105: PetscInt dim, depth, d;
1106: PetscInt pStart, pEnd, pStartP, pEndP;
1107: PetscErrorCode ierr;
1113: DMGetDimension(dm, &dim);
1114: DMPlexGetDepth(dm, &depth);
1115: DMPlexGetChart(dm,&pStart,&pEnd);
1116: DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1117: PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1118: for (d = 0; d <= depth; d++) {
1119: PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];
1121: if (hybridMax >= 0) {
1122: PetscInt sStart, sEnd, p;
1124: DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1125: for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1126: }
1127: }
1128: PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1129: PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1130: for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1131: for (d = 0; d <= depth; d++) {
1132: PetscInt sStart, sEnd, p, dd;
1134: DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1135: dd = (depth == 1 && d == 1) ? dim : d;
1136: for (p = sStart; p < sEnd; p++) {
1137: if (isHybridParallel[p-pStartP]) {
1138: pmesh->hybridPointMax[dd] = p;
1139: break;
1140: }
1141: }
1142: }
1143: PetscFree2(isHybrid,isHybridParallel);
1144: return(0);
1145: }
1147: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1148: {
1149: DM_Plex *mesh = (DM_Plex*) dm->data;
1150: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1151: MPI_Comm comm;
1152: DM refTree;
1153: PetscSection origParentSection, newParentSection;
1154: PetscInt *origParents, *origChildIDs;
1155: PetscBool flg;
1156: PetscErrorCode ierr;
1161: PetscObjectGetComm((PetscObject)dm, &comm);
1163: /* Set up tree */
1164: DMPlexGetReferenceTree(dm,&refTree);
1165: DMPlexSetReferenceTree(dmParallel,refTree);
1166: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1167: if (origParentSection) {
1168: PetscInt pStart, pEnd;
1169: PetscInt *newParents, *newChildIDs, *globParents;
1170: PetscInt *remoteOffsetsParents, newParentSize;
1171: PetscSF parentSF;
1173: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1174: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1175: PetscSectionSetChart(newParentSection,pStart,pEnd);
1176: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1177: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1178: PetscFree(remoteOffsetsParents);
1179: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1180: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1181: if (original) {
1182: PetscInt numParents;
1184: PetscSectionGetStorageSize(origParentSection,&numParents);
1185: PetscMalloc1(numParents,&globParents);
1186: ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1187: }
1188: else {
1189: globParents = origParents;
1190: }
1191: PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1192: PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1193: if (original) {
1194: PetscFree(globParents);
1195: }
1196: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1197: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1198: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1199: #if defined(PETSC_USE_DEBUG)
1200: {
1201: PetscInt p;
1202: PetscBool valid = PETSC_TRUE;
1203: for (p = 0; p < newParentSize; ++p) {
1204: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1205: }
1206: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1207: }
1208: #endif
1209: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1210: if (flg) {
1211: PetscPrintf(comm, "Serial Parent Section: \n");
1212: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1213: PetscPrintf(comm, "Parallel Parent Section: \n");
1214: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1215: PetscSFView(parentSF, NULL);
1216: }
1217: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1218: PetscSectionDestroy(&newParentSection);
1219: PetscFree2(newParents,newChildIDs);
1220: PetscSFDestroy(&parentSF);
1221: }
1222: pmesh->useAnchors = mesh->useAnchors;
1223: return(0);
1224: }
1226: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1227: {
1228: PetscMPIInt rank, size;
1229: MPI_Comm comm;
1230: PetscErrorCode ierr;
1236: /* Create point SF for parallel mesh */
1237: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1238: PetscObjectGetComm((PetscObject)dm, &comm);
1239: MPI_Comm_rank(comm, &rank);
1240: MPI_Comm_size(comm, &size);
1241: {
1242: const PetscInt *leaves;
1243: PetscSFNode *remotePoints, *rowners, *lowners;
1244: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1245: PetscInt pStart, pEnd;
1247: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1248: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1249: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1250: for (p=0; p<numRoots; p++) {
1251: rowners[p].rank = -1;
1252: rowners[p].index = -1;
1253: }
1254: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1255: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1256: for (p = 0; p < numLeaves; ++p) {
1257: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1258: lowners[p].rank = rank;
1259: lowners[p].index = leaves ? leaves[p] : p;
1260: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1261: lowners[p].rank = -2;
1262: lowners[p].index = -2;
1263: }
1264: }
1265: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1266: rowners[p].rank = -3;
1267: rowners[p].index = -3;
1268: }
1269: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1270: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1271: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1272: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1273: for (p = 0; p < numLeaves; ++p) {
1274: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1275: if (lowners[p].rank != rank) ++numGhostPoints;
1276: }
1277: PetscMalloc1(numGhostPoints, &ghostPoints);
1278: PetscMalloc1(numGhostPoints, &remotePoints);
1279: for (p = 0, gp = 0; p < numLeaves; ++p) {
1280: if (lowners[p].rank != rank) {
1281: ghostPoints[gp] = leaves ? leaves[p] : p;
1282: remotePoints[gp].rank = lowners[p].rank;
1283: remotePoints[gp].index = lowners[p].index;
1284: ++gp;
1285: }
1286: }
1287: PetscFree2(rowners,lowners);
1288: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1289: PetscSFSetFromOptions((dmParallel)->sf);
1290: }
1291: {
1292: PetscBool useCone, useClosure, useAnchors;
1294: DMGetBasicAdjacency(dm, &useCone, &useClosure);
1295: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1296: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1297: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1298: }
1299: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1300: return(0);
1301: }
1303: /*@
1304: DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1306: Input Parameters:
1307: + dm - The DMPlex object
1308: - flg - Balance the partition?
1310: Level: intermediate
1312: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1313: @*/
1314: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1315: {
1316: DM_Plex *mesh = (DM_Plex *)dm->data;
1319: mesh->partitionBalance = flg;
1320: return(0);
1321: }
1323: /*@
1324: DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1326: Input Parameter:
1327: . dm - The DMPlex object
1329: Output Parameter:
1330: . flg - Balance the partition?
1332: Level: intermediate
1334: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1335: @*/
1336: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1337: {
1338: DM_Plex *mesh = (DM_Plex *)dm->data;
1343: *flg = mesh->partitionBalance;
1344: return(0);
1345: }
1347: typedef struct {
1348: PetscInt vote, rank, index;
1349: } Petsc3Int;
1351: /* MaxLoc, but carry a third piece of information around */
1352: static void MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1353: {
1354: Petsc3Int *a = (Petsc3Int *)inout_;
1355: Petsc3Int *b = (Petsc3Int *)in_;
1356: PetscInt i, len = *len_;
1357: for (i = 0; i < len; i++) {
1358: if (a[i].vote < b[i].vote) {
1359: a[i].vote = b[i].vote;
1360: a[i].rank = b[i].rank;
1361: a[i].index = b[i].index;
1362: } else if (a[i].vote <= b[i].vote) {
1363: if (a[i].rank >= b[i].rank) {
1364: a[i].rank = b[i].rank;
1365: a[i].index = b[i].index;
1366: }
1367: }
1368: }
1369: }
1371: /*@C
1372: DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1374: Input Parameter:
1375: + dm - The source DMPlex object
1376: . migrationSF - The star forest that describes the parallel point remapping
1377: . ownership - Flag causing a vote to determine point ownership
1379: Output Parameter:
1380: - pointSF - The star forest describing the point overlap in the remapped DM
1382: Level: developer
1384: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1385: @*/
1386: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1387: {
1388: PetscMPIInt rank, size;
1389: PetscInt p, nroots, nleaves, idx, npointLeaves;
1390: PetscInt *pointLocal;
1391: const PetscInt *leaves;
1392: const PetscSFNode *roots;
1393: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1394: Vec shifts;
1395: const PetscInt numShifts = 13759;
1396: const PetscScalar *shift = NULL;
1397: const PetscBool shiftDebug = PETSC_FALSE;
1398: PetscBool balance;
1399: PetscErrorCode ierr;
1403: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1404: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1405: PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);
1407: DMPlexGetPartitionBalance(dm, &balance);
1408: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1409: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1410: if (ownership) {
1411: MPI_Op op;
1412: MPI_Datatype datatype;
1413: Petsc3Int *rootVote = NULL, *leafVote = NULL;
1414: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1415: if (balance) {
1416: PetscRandom r;
1418: PetscRandomCreate(PETSC_COMM_SELF, &r);
1419: PetscRandomSetInterval(r, 0, 2467*size);
1420: VecCreate(PETSC_COMM_SELF, &shifts);
1421: VecSetSizes(shifts, numShifts, numShifts);
1422: VecSetType(shifts, VECSTANDARD);
1423: VecSetRandom(shifts, r);
1424: PetscRandomDestroy(&r);
1425: VecGetArrayRead(shifts, &shift);
1426: }
1428: PetscMalloc1(nroots, &rootVote);
1429: PetscMalloc1(nleaves, &leafVote);
1430: /* Point ownership vote: Process with highest rank owns shared points */
1431: for (p = 0; p < nleaves; ++p) {
1432: if (shiftDebug) {
1433: PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1434: }
1435: /* Either put in a bid or we know we own it */
1436: leafVote[p].vote = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1437: leafVote[p].rank = rank;
1438: leafVote[p].index = p;
1439: }
1440: for (p = 0; p < nroots; p++) {
1441: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1442: rootVote[p].vote = -3;
1443: rootVote[p].rank = -3;
1444: rootVote[p].index = -3;
1445: }
1446: MPI_Type_contiguous(3, MPIU_INT, &datatype);
1447: MPI_Type_commit(&datatype);
1448: MPI_Op_create(&MaxLocCarry, 1, &op);
1449: PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1450: PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1451: MPI_Op_free(&op);
1452: MPI_Type_free(&datatype);
1453: for (p = 0; p < nroots; p++) {
1454: rootNodes[p].rank = rootVote[p].rank;
1455: rootNodes[p].index = rootVote[p].index;
1456: }
1457: PetscFree(leafVote);
1458: PetscFree(rootVote);
1459: } else {
1460: for (p = 0; p < nroots; p++) {
1461: rootNodes[p].index = -1;
1462: rootNodes[p].rank = rank;
1463: }
1464: for (p = 0; p < nleaves; p++) {
1465: /* Write new local id into old location */
1466: if (roots[p].rank == rank) {
1467: rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1468: }
1469: }
1470: }
1471: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1472: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1474: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1475: if (leafNodes[p].rank != rank) npointLeaves++;
1476: }
1477: PetscMalloc1(npointLeaves, &pointLocal);
1478: PetscMalloc1(npointLeaves, &pointRemote);
1479: for (idx = 0, p = 0; p < nleaves; p++) {
1480: if (leafNodes[p].rank != rank) {
1481: pointLocal[idx] = p;
1482: pointRemote[idx] = leafNodes[p];
1483: idx++;
1484: }
1485: }
1486: if (shift) {
1487: VecRestoreArrayRead(shifts, &shift);
1488: VecDestroy(&shifts);
1489: }
1490: if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1491: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1492: PetscSFSetFromOptions(*pointSF);
1493: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1494: PetscFree2(rootNodes, leafNodes);
1495: PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);
1496: return(0);
1497: }
1499: /*@C
1500: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1502: Collective on dm
1504: Input Parameter:
1505: + dm - The source DMPlex object
1506: . sf - The star forest communication context describing the migration pattern
1508: Output Parameter:
1509: - targetDM - The target DMPlex object
1511: Level: intermediate
1513: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1514: @*/
1515: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1516: {
1517: MPI_Comm comm;
1518: PetscInt dim, cdim, nroots;
1519: PetscSF sfPoint;
1520: ISLocalToGlobalMapping ltogMigration;
1521: ISLocalToGlobalMapping ltogOriginal = NULL;
1522: PetscBool flg;
1523: PetscErrorCode ierr;
1527: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1528: PetscObjectGetComm((PetscObject) dm, &comm);
1529: DMGetDimension(dm, &dim);
1530: DMSetDimension(targetDM, dim);
1531: DMGetCoordinateDim(dm, &cdim);
1532: DMSetCoordinateDim(targetDM, cdim);
1534: /* Check for a one-to-all distribution pattern */
1535: DMGetPointSF(dm, &sfPoint);
1536: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1537: if (nroots >= 0) {
1538: IS isOriginal;
1539: PetscInt n, size, nleaves;
1540: PetscInt *numbering_orig, *numbering_new;
1542: /* Get the original point numbering */
1543: DMPlexCreatePointNumbering(dm, &isOriginal);
1544: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1545: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1546: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1547: /* Convert to positive global numbers */
1548: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1549: /* Derive the new local-to-global mapping from the old one */
1550: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1551: PetscMalloc1(nleaves, &numbering_new);
1552: PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new);
1553: PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new);
1554: ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration);
1555: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1556: ISDestroy(&isOriginal);
1557: } else {
1558: /* One-to-all distribution pattern: We can derive LToG from SF */
1559: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1560: }
1561: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1562: if (flg) {
1563: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1564: ISLocalToGlobalMappingView(ltogMigration, NULL);
1565: }
1566: /* Migrate DM data to target DM */
1567: DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1568: DMPlexDistributeLabels(dm, sf, targetDM);
1569: DMPlexDistributeCoordinates(dm, sf, targetDM);
1570: DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1571: DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1572: ISLocalToGlobalMappingDestroy(<ogOriginal);
1573: ISLocalToGlobalMappingDestroy(<ogMigration);
1574: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1575: return(0);
1576: }
1578: /*@C
1579: DMPlexDistribute - Distributes the mesh and any associated sections.
1581: Collective on dm
1583: Input Parameter:
1584: + dm - The original DMPlex object
1585: - overlap - The overlap of partitions, 0 is the default
1587: Output Parameter:
1588: + sf - The PetscSF used for point distribution, or NULL if not needed
1589: - dmParallel - The distributed DMPlex object
1591: Note: If the mesh was not distributed, the output dmParallel will be NULL.
1593: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1594: representation on the mesh.
1596: Level: intermediate
1598: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1599: @*/
1600: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1601: {
1602: MPI_Comm comm;
1603: PetscPartitioner partitioner;
1604: IS cellPart;
1605: PetscSection cellPartSection;
1606: DM dmCoord;
1607: DMLabel lblPartition, lblMigration;
1608: PetscSF sfMigration, sfStratified, sfPoint;
1609: PetscBool flg, balance;
1610: PetscMPIInt rank, size;
1611: PetscErrorCode ierr;
1619: if (sf) *sf = NULL;
1620: *dmParallel = NULL;
1621: PetscObjectGetComm((PetscObject)dm,&comm);
1622: MPI_Comm_rank(comm, &rank);
1623: MPI_Comm_size(comm, &size);
1624: if (size == 1) return(0);
1626: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1627: /* Create cell partition */
1628: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1629: PetscSectionCreate(comm, &cellPartSection);
1630: DMPlexGetPartitioner(dm, &partitioner);
1631: PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1632: PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);
1633: {
1634: /* Convert partition to DMLabel */
1635: IS is;
1636: PetscHSetI ht;
1637: const PetscInt *points;
1638: PetscInt *iranks;
1639: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks;
1641: DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1642: /* Preallocate strata */
1643: PetscHSetICreate(&ht);
1644: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1645: for (proc = pStart; proc < pEnd; proc++) {
1646: PetscSectionGetDof(cellPartSection, proc, &npoints);
1647: if (npoints) {PetscHSetIAdd(ht, proc);}
1648: }
1649: PetscHSetIGetSize(ht, &nranks);
1650: PetscMalloc1(nranks, &iranks);
1651: PetscHSetIGetElems(ht, &poff, iranks);
1652: PetscHSetIDestroy(&ht);
1653: DMLabelAddStrata(lblPartition, nranks, iranks);
1654: PetscFree(iranks);
1655: /* Inline DMPlexPartitionLabelClosure() */
1656: ISGetIndices(cellPart, &points);
1657: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1658: for (proc = pStart; proc < pEnd; proc++) {
1659: PetscSectionGetDof(cellPartSection, proc, &npoints);
1660: if (!npoints) continue;
1661: PetscSectionGetOffset(cellPartSection, proc, &poff);
1662: DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);
1663: DMLabelSetStratumIS(lblPartition, proc, is);
1664: ISDestroy(&is);
1665: }
1666: ISRestoreIndices(cellPart, &points);
1667: }
1668: PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);
1670: DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1671: DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1672: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1673: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1674: PetscSFDestroy(&sfMigration);
1675: sfMigration = sfStratified;
1676: PetscSFSetUp(sfMigration);
1677: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1678: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1679: if (flg) {
1680: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1681: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1682: }
1684: /* Create non-overlapping parallel DM and migrate internal data */
1685: DMPlexCreate(comm, dmParallel);
1686: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1687: DMPlexMigrate(dm, sfMigration, *dmParallel);
1689: /* Build the point SF without overlap */
1690: DMPlexGetPartitionBalance(dm, &balance);
1691: DMPlexSetPartitionBalance(*dmParallel, balance);
1692: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1693: DMSetPointSF(*dmParallel, sfPoint);
1694: DMGetCoordinateDM(*dmParallel, &dmCoord);
1695: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1696: if (flg) {PetscSFView(sfPoint, NULL);}
1698: if (overlap > 0) {
1699: DM dmOverlap;
1700: PetscInt nroots, nleaves, noldleaves, l;
1701: const PetscInt *oldLeaves;
1702: PetscSFNode *newRemote, *permRemote;
1703: const PetscSFNode *oldRemote;
1704: PetscSF sfOverlap, sfOverlapPoint;
1706: /* Add the partition overlap to the distributed DM */
1707: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1708: DMDestroy(dmParallel);
1709: *dmParallel = dmOverlap;
1710: if (flg) {
1711: PetscPrintf(comm, "Overlap Migration SF:\n");
1712: PetscSFView(sfOverlap, NULL);
1713: }
1715: /* Re-map the migration SF to establish the full migration pattern */
1716: PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1717: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1718: PetscMalloc1(nleaves, &newRemote);
1719: /* oldRemote: original root point mapping to original leaf point
1720: newRemote: original leaf point mapping to overlapped leaf point */
1721: if (oldLeaves) {
1722: /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1723: PetscMalloc1(noldleaves, &permRemote);
1724: for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1725: oldRemote = permRemote;
1726: }
1727: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1728: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1729: if (oldLeaves) {PetscFree(oldRemote);}
1730: PetscSFCreate(comm, &sfOverlapPoint);
1731: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1732: PetscSFDestroy(&sfOverlap);
1733: PetscSFDestroy(&sfMigration);
1734: sfMigration = sfOverlapPoint;
1735: }
1736: /* Cleanup Partition */
1737: DMLabelDestroy(&lblPartition);
1738: DMLabelDestroy(&lblMigration);
1739: PetscSectionDestroy(&cellPartSection);
1740: ISDestroy(&cellPart);
1741: /* Copy BC */
1742: DMCopyBoundary(dm, *dmParallel);
1743: /* Create sfNatural */
1744: if (dm->useNatural) {
1745: PetscSection section;
1747: DMGetLocalSection(dm, §ion);
1748: DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1749: DMSetUseNatural(*dmParallel, PETSC_TRUE);
1750: }
1751: /* Cleanup */
1752: if (sf) {*sf = sfMigration;}
1753: else {PetscSFDestroy(&sfMigration);}
1754: PetscSFDestroy(&sfPoint);
1755: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1756: return(0);
1757: }
1759: /*@C
1760: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1762: Collective on dm
1764: Input Parameter:
1765: + dm - The non-overlapping distrbuted DMPlex object
1766: - overlap - The overlap of partitions (the same on all ranks)
1768: Output Parameter:
1769: + sf - The PetscSF used for point distribution
1770: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1772: Notes:
1773: If the mesh was not distributed, the return value is NULL.
1775: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1776: representation on the mesh.
1778: Level: advanced
1780: .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1781: @*/
1782: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1783: {
1784: MPI_Comm comm;
1785: PetscMPIInt size, rank;
1786: PetscSection rootSection, leafSection;
1787: IS rootrank, leafrank;
1788: DM dmCoord;
1789: DMLabel lblOverlap;
1790: PetscSF sfOverlap, sfStratified, sfPoint;
1791: PetscErrorCode ierr;
1799: if (sf) *sf = NULL;
1800: *dmOverlap = NULL;
1801: PetscObjectGetComm((PetscObject)dm,&comm);
1802: MPI_Comm_size(comm, &size);
1803: MPI_Comm_rank(comm, &rank);
1804: if (size == 1) return(0);
1806: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1807: /* Compute point overlap with neighbouring processes on the distributed DM */
1808: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1809: PetscSectionCreate(comm, &rootSection);
1810: PetscSectionCreate(comm, &leafSection);
1811: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1812: DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1813: /* Convert overlap label to stratified migration SF */
1814: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1815: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1816: PetscSFDestroy(&sfOverlap);
1817: sfOverlap = sfStratified;
1818: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1819: PetscSFSetFromOptions(sfOverlap);
1821: PetscSectionDestroy(&rootSection);
1822: PetscSectionDestroy(&leafSection);
1823: ISDestroy(&rootrank);
1824: ISDestroy(&leafrank);
1825: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1827: /* Build the overlapping DM */
1828: DMPlexCreate(comm, dmOverlap);
1829: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1830: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1831: /* Store the overlap in the new DM */
1832: ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1833: /* Build the new point SF */
1834: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1835: DMSetPointSF(*dmOverlap, sfPoint);
1836: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1837: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1838: PetscSFDestroy(&sfPoint);
1839: /* Cleanup overlap partition */
1840: DMLabelDestroy(&lblOverlap);
1841: if (sf) *sf = sfOverlap;
1842: else {PetscSFDestroy(&sfOverlap);}
1843: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1844: return(0);
1845: }
1847: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1848: {
1849: DM_Plex *mesh = (DM_Plex*) dm->data;
1852: *overlap = mesh->overlap;
1853: return(0);
1854: }
1856: /*@
1857: DMPlexGetOverlap - Get the DMPlex partition overlap.
1859: Not collective
1861: Input Parameter:
1862: . dm - The DM
1864: Output Parameters:
1865: . overlap - The overlap of this DM
1867: Level: intermediate
1869: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1870: @*/
1871: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1872: {
1873: PetscErrorCode ierr;
1877: PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1878: return(0);
1879: }
1882: /*@C
1883: DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1884: root process of the original's communicator.
1886: Collective on dm
1888: Input Parameters:
1889: . dm - the original DMPlex object
1891: Output Parameters:
1892: + sf - the PetscSF used for point distribution (optional)
1893: - gatherMesh - the gathered DM object, or NULL
1895: Level: intermediate
1897: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1898: @*/
1899: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1900: {
1901: MPI_Comm comm;
1902: PetscMPIInt size;
1903: PetscPartitioner oldPart, gatherPart;
1909: *gatherMesh = NULL;
1910: if (sf) *sf = NULL;
1911: comm = PetscObjectComm((PetscObject)dm);
1912: MPI_Comm_size(comm,&size);
1913: if (size == 1) return(0);
1914: DMPlexGetPartitioner(dm,&oldPart);
1915: PetscObjectReference((PetscObject)oldPart);
1916: PetscPartitionerCreate(comm,&gatherPart);
1917: PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1918: DMPlexSetPartitioner(dm,gatherPart);
1919: DMPlexDistribute(dm,0,sf,gatherMesh);
1921: DMPlexSetPartitioner(dm,oldPart);
1922: PetscPartitionerDestroy(&gatherPart);
1923: PetscPartitionerDestroy(&oldPart);
1924: return(0);
1925: }
1927: /*@C
1928: DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1930: Collective on dm
1932: Input Parameters:
1933: . dm - the original DMPlex object
1935: Output Parameters:
1936: + sf - the PetscSF used for point distribution (optional)
1937: - redundantMesh - the redundant DM object, or NULL
1939: Level: intermediate
1941: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1942: @*/
1943: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1944: {
1945: MPI_Comm comm;
1946: PetscMPIInt size, rank;
1947: PetscInt pStart, pEnd, p;
1948: PetscInt numPoints = -1;
1949: PetscSF migrationSF, sfPoint, gatherSF;
1950: DM gatherDM, dmCoord;
1951: PetscSFNode *points;
1957: *redundantMesh = NULL;
1958: comm = PetscObjectComm((PetscObject)dm);
1959: MPI_Comm_size(comm,&size);
1960: if (size == 1) {
1961: PetscObjectReference((PetscObject) dm);
1962: *redundantMesh = dm;
1963: if (sf) *sf = NULL;
1964: return(0);
1965: }
1966: DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1967: if (!gatherDM) return(0);
1968: MPI_Comm_rank(comm,&rank);
1969: DMPlexGetChart(gatherDM,&pStart,&pEnd);
1970: numPoints = pEnd - pStart;
1971: MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1972: PetscMalloc1(numPoints,&points);
1973: PetscSFCreate(comm,&migrationSF);
1974: for (p = 0; p < numPoints; p++) {
1975: points[p].index = p;
1976: points[p].rank = 0;
1977: }
1978: PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1979: DMPlexCreate(comm, redundantMesh);
1980: PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1981: DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1982: DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1983: DMSetPointSF(*redundantMesh, sfPoint);
1984: DMGetCoordinateDM(*redundantMesh, &dmCoord);
1985: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1986: PetscSFDestroy(&sfPoint);
1987: if (sf) {
1988: PetscSF tsf;
1990: PetscSFCompose(gatherSF,migrationSF,&tsf);
1991: DMPlexStratifyMigrationSF(dm, tsf, sf);
1992: PetscSFDestroy(&tsf);
1993: }
1994: PetscSFDestroy(&migrationSF);
1995: PetscSFDestroy(&gatherSF);
1996: DMDestroy(&gatherDM);
1997: return(0);
1998: }