Actual source code: plexdistribute.c
petsc-3.11.4 2019-09-28
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be NULL (to clear the callback)
10: - ctx - context for callback evaluation, may be NULL
12: Level: advanced
14: Notes:
15: The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of DMPlex adjacency determination.
19: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: return(0);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The DM object
38: Output Parameters:
39: - user - The user callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: return(0);
55: }
57: /*@
58: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
60: Input Parameters:
61: + dm - The DM object
62: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
64: Level: intermediate
66: .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
67: @*/
68: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69: {
70: DM_Plex *mesh = (DM_Plex *) dm->data;
74: mesh->useAnchors = useAnchors;
75: return(0);
76: }
78: /*@
79: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
81: Input Parameter:
82: . dm - The DM object
84: Output Parameter:
85: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
87: Level: intermediate
89: .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
90: @*/
91: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92: {
93: DM_Plex *mesh = (DM_Plex *) dm->data;
98: *useAnchors = mesh->useAnchors;
99: return(0);
100: }
102: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103: {
104: const PetscInt *cone = NULL;
105: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106: PetscErrorCode ierr;
109: DMPlexGetConeSize(dm, p, &coneSize);
110: DMPlexGetCone(dm, p, &cone);
111: for (c = 0; c <= coneSize; ++c) {
112: const PetscInt point = !c ? p : cone[c-1];
113: const PetscInt *support = NULL;
114: PetscInt supportSize, s, q;
116: DMPlexGetSupportSize(dm, point, &supportSize);
117: DMPlexGetSupport(dm, point, &support);
118: for (s = 0; s < supportSize; ++s) {
119: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
120: if (support[s] == adj[q]) break;
121: }
122: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
123: }
124: }
125: *adjSize = numAdj;
126: return(0);
127: }
129: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
130: {
131: const PetscInt *support = NULL;
132: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
133: PetscErrorCode ierr;
136: DMPlexGetSupportSize(dm, p, &supportSize);
137: DMPlexGetSupport(dm, p, &support);
138: for (s = 0; s <= supportSize; ++s) {
139: const PetscInt point = !s ? p : support[s-1];
140: const PetscInt *cone = NULL;
141: PetscInt coneSize, c, q;
143: DMPlexGetConeSize(dm, point, &coneSize);
144: DMPlexGetCone(dm, point, &cone);
145: for (c = 0; c < coneSize; ++c) {
146: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
147: if (cone[c] == adj[q]) break;
148: }
149: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
150: }
151: }
152: *adjSize = numAdj;
153: return(0);
154: }
156: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
157: {
158: PetscInt *star = NULL;
159: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
163: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
164: for (s = 0; s < starSize*2; s += 2) {
165: const PetscInt *closure = NULL;
166: PetscInt closureSize, c, q;
168: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
169: for (c = 0; c < closureSize*2; c += 2) {
170: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
171: if (closure[c] == adj[q]) break;
172: }
173: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
174: }
175: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
176: }
177: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
178: *adjSize = numAdj;
179: return(0);
180: }
182: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
183: {
184: static PetscInt asiz = 0;
185: PetscInt maxAnchors = 1;
186: PetscInt aStart = -1, aEnd = -1;
187: PetscInt maxAdjSize;
188: PetscSection aSec = NULL;
189: IS aIS = NULL;
190: const PetscInt *anchors;
191: DM_Plex *mesh = (DM_Plex *)dm->data;
192: PetscErrorCode ierr;
195: if (useAnchors) {
196: DMPlexGetAnchors(dm,&aSec,&aIS);
197: if (aSec) {
198: PetscSectionGetMaxDof(aSec,&maxAnchors);
199: maxAnchors = PetscMax(1,maxAnchors);
200: PetscSectionGetChart(aSec,&aStart,&aEnd);
201: ISGetIndices(aIS,&anchors);
202: }
203: }
204: if (!*adj) {
205: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
207: DMPlexGetChart(dm, &pStart,&pEnd);
208: DMPlexGetDepth(dm, &depth);
209: DMPlexGetMaxSizes(dm, &maxC, &maxS);
210: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
211: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
212: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
213: asiz *= maxAnchors;
214: asiz = PetscMin(asiz,pEnd-pStart);
215: PetscMalloc1(asiz,adj);
216: }
217: if (*adjSize < 0) *adjSize = asiz;
218: maxAdjSize = *adjSize;
219: if (mesh->useradjacency) {
220: mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);
221: } else if (useTransitiveClosure) {
222: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
223: } else if (useCone) {
224: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
225: } else {
226: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
227: }
228: if (useAnchors && aSec) {
229: PetscInt origSize = *adjSize;
230: PetscInt numAdj = origSize;
231: PetscInt i = 0, j;
232: PetscInt *orig = *adj;
234: while (i < origSize) {
235: PetscInt p = orig[i];
236: PetscInt aDof = 0;
238: if (p >= aStart && p < aEnd) {
239: PetscSectionGetDof(aSec,p,&aDof);
240: }
241: if (aDof) {
242: PetscInt aOff;
243: PetscInt s, q;
245: for (j = i + 1; j < numAdj; j++) {
246: orig[j - 1] = orig[j];
247: }
248: origSize--;
249: numAdj--;
250: PetscSectionGetOffset(aSec,p,&aOff);
251: for (s = 0; s < aDof; ++s) {
252: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
253: if (anchors[aOff+s] == orig[q]) break;
254: }
255: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
256: }
257: }
258: else {
259: i++;
260: }
261: }
262: *adjSize = numAdj;
263: ISRestoreIndices(aIS,&anchors);
264: }
265: return(0);
266: }
268: /*@
269: DMPlexGetAdjacency - Return all points adjacent to the given point
271: Input Parameters:
272: + dm - The DM object
273: . p - The point
274: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
275: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize
277: Output Parameters:
278: + adjSize - The number of adjacent points
279: - adj - The adjacent points
281: Level: advanced
283: Notes:
284: The user must PetscFree the adj array if it was not passed in.
286: .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
287: @*/
288: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
289: {
290: PetscBool useCone, useClosure, useAnchors;
297: DMGetBasicAdjacency(dm, &useCone, &useClosure);
298: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
299: DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
300: return(0);
301: }
303: /*@
304: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
306: Collective on DM
308: Input Parameters:
309: + dm - The DM
310: - sfPoint - The PetscSF which encodes point connectivity
312: Output Parameters:
313: + processRanks - A list of process neighbors, or NULL
314: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
316: Level: developer
318: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319: @*/
320: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321: {
322: const PetscSFNode *remotePoints;
323: PetscInt *localPointsNew;
324: PetscSFNode *remotePointsNew;
325: const PetscInt *nranks;
326: PetscInt *ranksNew;
327: PetscBT neighbors;
328: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329: PetscMPIInt size, proc, rank;
330: PetscErrorCode ierr;
337: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
338: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
339: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
340: PetscBTCreate(size, &neighbors);
341: PetscBTMemzero(size, neighbors);
342: /* Compute root-to-leaf process connectivity */
343: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
344: ISGetIndices(rootRanks, &nranks);
345: for (p = pStart; p < pEnd; ++p) {
346: PetscInt ndof, noff, n;
348: PetscSectionGetDof(rootRankSection, p, &ndof);
349: PetscSectionGetOffset(rootRankSection, p, &noff);
350: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
351: }
352: ISRestoreIndices(rootRanks, &nranks);
353: /* Compute leaf-to-neighbor process connectivity */
354: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
355: ISGetIndices(leafRanks, &nranks);
356: for (p = pStart; p < pEnd; ++p) {
357: PetscInt ndof, noff, n;
359: PetscSectionGetDof(leafRankSection, p, &ndof);
360: PetscSectionGetOffset(leafRankSection, p, &noff);
361: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
362: }
363: ISRestoreIndices(leafRanks, &nranks);
364: /* Compute leaf-to-root process connectivity */
365: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
366: /* Calculate edges */
367: PetscBTClear(neighbors, rank);
368: for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
369: PetscMalloc1(numNeighbors, &ranksNew);
370: PetscMalloc1(numNeighbors, &localPointsNew);
371: PetscMalloc1(numNeighbors, &remotePointsNew);
372: for(proc = 0, n = 0; proc < size; ++proc) {
373: if (PetscBTLookup(neighbors, proc)) {
374: ranksNew[n] = proc;
375: localPointsNew[n] = proc;
376: remotePointsNew[n].index = rank;
377: remotePointsNew[n].rank = proc;
378: ++n;
379: }
380: }
381: PetscBTDestroy(&neighbors);
382: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
383: else {PetscFree(ranksNew);}
384: if (sfProcess) {
385: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
386: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
387: PetscSFSetFromOptions(*sfProcess);
388: PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
389: }
390: return(0);
391: }
393: /*@
394: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
396: Collective on DM
398: Input Parameter:
399: . dm - The DM
401: Output Parameters:
402: + rootSection - The number of leaves for a given root point
403: . rootrank - The rank of each edge into the root point
404: . leafSection - The number of processes sharing a given leaf point
405: - leafrank - The rank of each process sharing a leaf point
407: Level: developer
409: .seealso: DMPlexCreateOverlap()
410: @*/
411: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
412: {
413: MPI_Comm comm;
414: PetscSF sfPoint;
415: const PetscInt *rootdegree;
416: PetscInt *myrank, *remoterank;
417: PetscInt pStart, pEnd, p, nedges;
418: PetscMPIInt rank;
419: PetscErrorCode ierr;
422: PetscObjectGetComm((PetscObject) dm, &comm);
423: MPI_Comm_rank(comm, &rank);
424: DMPlexGetChart(dm, &pStart, &pEnd);
425: DMGetPointSF(dm, &sfPoint);
426: /* Compute number of leaves for each root */
427: PetscObjectSetName((PetscObject) rootSection, "Root Section");
428: PetscSectionSetChart(rootSection, pStart, pEnd);
429: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
430: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
431: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
432: PetscSectionSetUp(rootSection);
433: /* Gather rank of each leaf to root */
434: PetscSectionGetStorageSize(rootSection, &nedges);
435: PetscMalloc1(pEnd-pStart, &myrank);
436: PetscMalloc1(nedges, &remoterank);
437: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
438: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
439: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
440: PetscFree(myrank);
441: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
442: /* Distribute remote ranks to leaves */
443: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
444: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
445: return(0);
446: }
448: /*@C
449: DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.
451: Collective on DM
453: Input Parameters:
454: + dm - The DM
455: . levels - Number of overlap levels
456: . rootSection - The number of leaves for a given root point
457: . rootrank - The rank of each edge into the root point
458: . leafSection - The number of processes sharing a given leaf point
459: - leafrank - The rank of each process sharing a leaf point
461: Output Parameters:
462: + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
464: Level: developer
466: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
467: @*/
468: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
469: {
470: MPI_Comm comm;
471: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
472: PetscSF sfPoint;
473: const PetscSFNode *remote;
474: const PetscInt *local;
475: const PetscInt *nrank, *rrank;
476: PetscInt *adj = NULL;
477: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
478: PetscMPIInt rank, size;
479: PetscBool flg;
480: PetscErrorCode ierr;
483: PetscObjectGetComm((PetscObject) dm, &comm);
484: MPI_Comm_size(comm, &size);
485: MPI_Comm_rank(comm, &rank);
486: DMGetPointSF(dm, &sfPoint);
487: DMPlexGetChart(dm, &pStart, &pEnd);
488: PetscSectionGetChart(leafSection, &sStart, &sEnd);
489: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
490: DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
491: /* Handle leaves: shared with the root point */
492: for (l = 0; l < nleaves; ++l) {
493: PetscInt adjSize = PETSC_DETERMINE, a;
495: DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
496: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
497: }
498: ISGetIndices(rootrank, &rrank);
499: ISGetIndices(leafrank, &nrank);
500: /* Handle roots */
501: for (p = pStart; p < pEnd; ++p) {
502: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
504: if ((p >= sStart) && (p < sEnd)) {
505: /* Some leaves share a root with other leaves on different processes */
506: PetscSectionGetDof(leafSection, p, &neighbors);
507: if (neighbors) {
508: PetscSectionGetOffset(leafSection, p, &noff);
509: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
510: for (n = 0; n < neighbors; ++n) {
511: const PetscInt remoteRank = nrank[noff+n];
513: if (remoteRank == rank) continue;
514: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
515: }
516: }
517: }
518: /* Roots are shared with leaves */
519: PetscSectionGetDof(rootSection, p, &neighbors);
520: if (!neighbors) continue;
521: PetscSectionGetOffset(rootSection, p, &noff);
522: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
523: for (n = 0; n < neighbors; ++n) {
524: const PetscInt remoteRank = rrank[noff+n];
526: if (remoteRank == rank) continue;
527: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
528: }
529: }
530: PetscFree(adj);
531: ISRestoreIndices(rootrank, &rrank);
532: ISRestoreIndices(leafrank, &nrank);
533: /* Add additional overlap levels */
534: for (l = 1; l < levels; l++) {
535: /* Propagate point donations over SF to capture remote connections */
536: DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
537: /* Add next level of point donations to the label */
538: DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
539: }
540: /* We require the closure in the overlap */
541: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
542: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
543: if (flg) {
544: PetscViewer viewer;
545: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
546: DMLabelView(ovAdjByRank, viewer);
547: }
548: /* Invert sender to receiver label */
549: DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
550: DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
551: /* Add owned points, except for shared local points */
552: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
553: for (l = 0; l < nleaves; ++l) {
554: DMLabelClearValue(*ovLabel, local[l], rank);
555: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
556: }
557: /* Clean up */
558: DMLabelDestroy(&ovAdjByRank);
559: return(0);
560: }
562: /*@C
563: DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
565: Collective on DM
567: Input Parameters:
568: + dm - The DM
569: - overlapSF - The SF mapping ghost points in overlap to owner points on other processes
571: Output Parameters:
572: + migrationSF - An SF that maps original points in old locations to points in new locations
574: Level: developer
576: .seealso: DMPlexCreateOverlap(), DMPlexDistribute()
577: @*/
578: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
579: {
580: MPI_Comm comm;
581: PetscMPIInt rank, size;
582: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
583: PetscInt *pointDepths, *remoteDepths, *ilocal;
584: PetscInt *depthRecv, *depthShift, *depthIdx;
585: PetscSFNode *iremote;
586: PetscSF pointSF;
587: const PetscInt *sharedLocal;
588: const PetscSFNode *overlapRemote, *sharedRemote;
589: PetscErrorCode ierr;
593: PetscObjectGetComm((PetscObject)dm, &comm);
594: MPI_Comm_rank(comm, &rank);
595: MPI_Comm_size(comm, &size);
596: DMGetDimension(dm, &dim);
598: /* Before building the migration SF we need to know the new stratum offsets */
599: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
600: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
601: for (d=0; d<dim+1; d++) {
602: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
603: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
604: }
605: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
606: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
607: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);
609: /* Count recevied points in each stratum and compute the internal strata shift */
610: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
611: for (d=0; d<dim+1; d++) depthRecv[d]=0;
612: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
613: depthShift[dim] = 0;
614: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
615: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
616: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
617: for (d=0; d<dim+1; d++) {
618: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
619: depthIdx[d] = pStart + depthShift[d];
620: }
622: /* Form the overlap SF build an SF that describes the full overlap migration SF */
623: DMPlexGetChart(dm, &pStart, &pEnd);
624: newLeaves = pEnd - pStart + nleaves;
625: PetscMalloc1(newLeaves, &ilocal);
626: PetscMalloc1(newLeaves, &iremote);
627: /* First map local points to themselves */
628: for (d=0; d<dim+1; d++) {
629: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
630: for (p=pStart; p<pEnd; p++) {
631: point = p + depthShift[d];
632: ilocal[point] = point;
633: iremote[point].index = p;
634: iremote[point].rank = rank;
635: depthIdx[d]++;
636: }
637: }
639: /* Add in the remote roots for currently shared points */
640: DMGetPointSF(dm, &pointSF);
641: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
642: for (d=0; d<dim+1; d++) {
643: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
644: for (p=0; p<numSharedPoints; p++) {
645: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
646: point = sharedLocal[p] + depthShift[d];
647: iremote[point].index = sharedRemote[p].index;
648: iremote[point].rank = sharedRemote[p].rank;
649: }
650: }
651: }
653: /* Now add the incoming overlap points */
654: for (p=0; p<nleaves; p++) {
655: point = depthIdx[remoteDepths[p]];
656: ilocal[point] = point;
657: iremote[point].index = overlapRemote[p].index;
658: iremote[point].rank = overlapRemote[p].rank;
659: depthIdx[remoteDepths[p]]++;
660: }
661: PetscFree2(pointDepths,remoteDepths);
663: PetscSFCreate(comm, migrationSF);
664: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
665: PetscSFSetFromOptions(*migrationSF);
666: DMPlexGetChart(dm, &pStart, &pEnd);
667: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
669: PetscFree3(depthRecv, depthShift, depthIdx);
670: return(0);
671: }
673: /*@
674: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
676: Input Parameter:
677: + dm - The DM
678: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
680: Output Parameter:
681: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
683: Level: developer
685: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
686: @*/
687: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
688: {
689: MPI_Comm comm;
690: PetscMPIInt rank, size;
691: PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
692: PetscInt *pointDepths, *remoteDepths, *ilocal;
693: PetscInt *depthRecv, *depthShift, *depthIdx;
694: PetscInt hybEnd[4];
695: const PetscSFNode *iremote;
696: PetscErrorCode ierr;
700: PetscObjectGetComm((PetscObject) dm, &comm);
701: MPI_Comm_rank(comm, &rank);
702: MPI_Comm_size(comm, &size);
703: DMPlexGetDepth(dm, &ldepth);
704: MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
705: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
707: /* Before building the migration SF we need to know the new stratum offsets */
708: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
709: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
710: DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);
711: for (d = 0; d < depth+1; ++d) {
712: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
713: for (p = pStart; p < pEnd; ++p) {
714: if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
715: pointDepths[p] = 2 * d;
716: } else {
717: pointDepths[p] = 2 * d + 1;
718: }
719: }
720: }
721: for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
722: PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
723: PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
724: /* Count recevied points in each stratum and compute the internal strata shift */
725: PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
726: for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
727: for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
728: depthShift[2*depth+1] = 0;
729: for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
730: for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
731: depthShift[0] += depthRecv[1];
732: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
733: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
734: for (d = 2 * depth-1; d > 2; --d) {
735: PetscInt e;
737: for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
738: }
739: for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
740: /* Derive a new local permutation based on stratified indices */
741: PetscMalloc1(nleaves, &ilocal);
742: for (p = 0; p < nleaves; ++p) {
743: const PetscInt dep = remoteDepths[p];
745: ilocal[p] = depthShift[dep] + depthIdx[dep];
746: depthIdx[dep]++;
747: }
748: PetscSFCreate(comm, migrationSF);
749: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
750: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
751: PetscFree2(pointDepths,remoteDepths);
752: PetscFree3(depthRecv, depthShift, depthIdx);
753: return(0);
754: }
756: /*@
757: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
759: Collective on DM
761: Input Parameters:
762: + dm - The DMPlex object
763: . pointSF - The PetscSF describing the communication pattern
764: . originalSection - The PetscSection for existing data layout
765: - originalVec - The existing data
767: Output Parameters:
768: + newSection - The PetscSF describing the new data layout
769: - newVec - The new data
771: Level: developer
773: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
774: @*/
775: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
776: {
777: PetscSF fieldSF;
778: PetscInt *remoteOffsets, fieldSize;
779: PetscScalar *originalValues, *newValues;
783: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
784: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
786: PetscSectionGetStorageSize(newSection, &fieldSize);
787: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
788: VecSetType(newVec,dm->vectype);
790: VecGetArray(originalVec, &originalValues);
791: VecGetArray(newVec, &newValues);
792: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
793: PetscFree(remoteOffsets);
794: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
795: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
796: PetscSFDestroy(&fieldSF);
797: VecRestoreArray(newVec, &newValues);
798: VecRestoreArray(originalVec, &originalValues);
799: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
800: return(0);
801: }
803: /*@
804: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
806: Collective on DM
808: Input Parameters:
809: + dm - The DMPlex object
810: . pointSF - The PetscSF describing the communication pattern
811: . originalSection - The PetscSection for existing data layout
812: - originalIS - The existing data
814: Output Parameters:
815: + newSection - The PetscSF describing the new data layout
816: - newIS - The new data
818: Level: developer
820: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
821: @*/
822: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
823: {
824: PetscSF fieldSF;
825: PetscInt *newValues, *remoteOffsets, fieldSize;
826: const PetscInt *originalValues;
827: PetscErrorCode ierr;
830: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
831: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
833: PetscSectionGetStorageSize(newSection, &fieldSize);
834: PetscMalloc1(fieldSize, &newValues);
836: ISGetIndices(originalIS, &originalValues);
837: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
838: PetscFree(remoteOffsets);
839: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
840: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
841: PetscSFDestroy(&fieldSF);
842: ISRestoreIndices(originalIS, &originalValues);
843: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
844: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
845: return(0);
846: }
848: /*@
849: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
851: Collective on DM
853: Input Parameters:
854: + dm - The DMPlex object
855: . pointSF - The PetscSF describing the communication pattern
856: . originalSection - The PetscSection for existing data layout
857: . datatype - The type of data
858: - originalData - The existing data
860: Output Parameters:
861: + newSection - The PetscSection describing the new data layout
862: - newData - The new data
864: Level: developer
866: .seealso: DMPlexDistribute(), DMPlexDistributeField()
867: @*/
868: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
869: {
870: PetscSF fieldSF;
871: PetscInt *remoteOffsets, fieldSize;
872: PetscMPIInt dataSize;
876: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
877: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
879: PetscSectionGetStorageSize(newSection, &fieldSize);
880: MPI_Type_size(datatype, &dataSize);
881: PetscMalloc(fieldSize * dataSize, newData);
883: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
884: PetscFree(remoteOffsets);
885: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
886: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
887: PetscSFDestroy(&fieldSF);
888: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
889: return(0);
890: }
892: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
893: {
894: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
895: MPI_Comm comm;
896: PetscSF coneSF;
897: PetscSection originalConeSection, newConeSection;
898: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
899: PetscBool flg;
900: PetscErrorCode ierr;
906: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
907: /* Distribute cone section */
908: PetscObjectGetComm((PetscObject)dm, &comm);
909: DMPlexGetConeSection(dm, &originalConeSection);
910: DMPlexGetConeSection(dmParallel, &newConeSection);
911: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
912: DMSetUp(dmParallel);
913: {
914: PetscInt pStart, pEnd, p;
916: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
917: for (p = pStart; p < pEnd; ++p) {
918: PetscInt coneSize;
919: PetscSectionGetDof(newConeSection, p, &coneSize);
920: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
921: }
922: }
923: /* Communicate and renumber cones */
924: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
925: PetscFree(remoteOffsets);
926: DMPlexGetCones(dm, &cones);
927: if (original) {
928: PetscInt numCones;
930: PetscSectionGetStorageSize(originalConeSection,&numCones);
931: PetscMalloc1(numCones,&globCones);
932: ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
933: } else {
934: globCones = cones;
935: }
936: DMPlexGetCones(dmParallel, &newCones);
937: PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
938: PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
939: if (original) {
940: PetscFree(globCones);
941: }
942: PetscSectionGetStorageSize(newConeSection, &newConesSize);
943: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
944: #if defined(PETSC_USE_DEBUG)
945: {
946: PetscInt p;
947: PetscBool valid = PETSC_TRUE;
948: for (p = 0; p < newConesSize; ++p) {
949: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);}
950: }
951: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
952: }
953: #endif
954: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
955: if (flg) {
956: PetscPrintf(comm, "Serial Cone Section:\n");
957: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
958: PetscPrintf(comm, "Parallel Cone Section:\n");
959: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
960: PetscSFView(coneSF, NULL);
961: }
962: DMPlexGetConeOrientations(dm, &cones);
963: DMPlexGetConeOrientations(dmParallel, &newCones);
964: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
965: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
966: PetscSFDestroy(&coneSF);
967: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
968: /* Create supports and stratify DMPlex */
969: {
970: PetscInt pStart, pEnd;
972: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
973: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
974: }
975: DMPlexSymmetrize(dmParallel);
976: DMPlexStratify(dmParallel);
977: {
978: PetscBool useCone, useClosure, useAnchors;
980: DMGetBasicAdjacency(dm, &useCone, &useClosure);
981: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
982: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
983: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
984: }
985: return(0);
986: }
988: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
989: {
990: MPI_Comm comm;
991: PetscSection originalCoordSection, newCoordSection;
992: Vec originalCoordinates, newCoordinates;
993: PetscInt bs;
994: PetscBool isper;
995: const char *name;
996: const PetscReal *maxCell, *L;
997: const DMBoundaryType *bd;
998: PetscErrorCode ierr;
1004: PetscObjectGetComm((PetscObject)dm, &comm);
1005: DMGetCoordinateSection(dm, &originalCoordSection);
1006: DMGetCoordinateSection(dmParallel, &newCoordSection);
1007: DMGetCoordinatesLocal(dm, &originalCoordinates);
1008: if (originalCoordinates) {
1009: VecCreate(PETSC_COMM_SELF, &newCoordinates);
1010: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1011: PetscObjectSetName((PetscObject) newCoordinates, name);
1013: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1014: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1015: VecGetBlockSize(originalCoordinates, &bs);
1016: VecSetBlockSize(newCoordinates, bs);
1017: VecDestroy(&newCoordinates);
1018: }
1019: DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1020: DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1021: return(0);
1022: }
1024: /* Here we are assuming that process 0 always has everything */
1025: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1026: {
1027: DM_Plex *mesh = (DM_Plex*) dm->data;
1028: MPI_Comm comm;
1029: DMLabel depthLabel;
1030: PetscMPIInt rank;
1031: PetscInt depth, d, numLabels, numLocalLabels, l;
1032: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1033: PetscObjectState depthState = -1;
1034: PetscErrorCode ierr;
1040: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1041: PetscObjectGetComm((PetscObject)dm, &comm);
1042: MPI_Comm_rank(comm, &rank);
1044: /* If the user has changed the depth label, communicate it instead */
1045: DMPlexGetDepth(dm, &depth);
1046: DMPlexGetDepthLabel(dm, &depthLabel);
1047: if (depthLabel) {PetscObjectStateGet((PetscObject) depthLabel, &depthState);}
1048: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1049: MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1050: if (sendDepth) {
1051: DMRemoveLabel(dmParallel, "depth", &depthLabel);
1052: DMLabelDestroy(&depthLabel);
1053: }
1054: /* Everyone must have either the same number of labels, or none */
1055: DMGetNumLabels(dm, &numLocalLabels);
1056: numLabels = numLocalLabels;
1057: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1058: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1059: for (l = numLabels-1; l >= 0; --l) {
1060: DMLabel label = NULL, labelNew = NULL;
1061: PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput;
1062: const char *name = NULL;
1064: if (hasLabels) {
1065: DMGetLabelByNum(dm, l, &label);
1066: /* Skip "depth" because it is recreated */
1067: PetscObjectGetName((PetscObject) label, &name);
1068: PetscStrcmp(name, "depth", &isDepth);
1069: }
1070: MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1071: if (isDepth && !sendDepth) continue;
1072: DMLabelDistribute(label, migrationSF, &labelNew);
1073: if (isDepth) {
1074: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1075: PetscInt gdepth;
1077: MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1078: if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1079: for (d = 0; d <= gdepth; ++d) {
1080: PetscBool has;
1082: DMLabelHasStratum(labelNew, d, &has);
1083: if (!has) {DMLabelAddStratum(labelNew, d);}
1084: }
1085: }
1086: DMAddLabel(dmParallel, labelNew);
1087: /* Put the output flag in the new label */
1088: if (hasLabels) {DMGetLabelOutput(dm, name, &lisOutput);}
1089: MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1090: PetscObjectGetName((PetscObject) labelNew, &name);
1091: DMSetLabelOutput(dmParallel, name, isOutput);
1092: }
1093: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1094: return(0);
1095: }
1097: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1098: {
1099: DM_Plex *mesh = (DM_Plex*) dm->data;
1100: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1101: PetscBool *isHybrid, *isHybridParallel;
1102: PetscInt dim, depth, d;
1103: PetscInt pStart, pEnd, pStartP, pEndP;
1104: PetscErrorCode ierr;
1110: DMGetDimension(dm, &dim);
1111: DMPlexGetDepth(dm, &depth);
1112: DMPlexGetChart(dm,&pStart,&pEnd);
1113: DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1114: PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1115: for (d = 0; d <= depth; d++) {
1116: PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];
1118: if (hybridMax >= 0) {
1119: PetscInt sStart, sEnd, p;
1121: DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1122: for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1123: }
1124: }
1125: PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1126: PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1127: for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1128: for (d = 0; d <= depth; d++) {
1129: PetscInt sStart, sEnd, p, dd;
1131: DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1132: dd = (depth == 1 && d == 1) ? dim : d;
1133: for (p = sStart; p < sEnd; p++) {
1134: if (isHybridParallel[p-pStartP]) {
1135: pmesh->hybridPointMax[dd] = p;
1136: break;
1137: }
1138: }
1139: }
1140: PetscFree2(isHybrid,isHybridParallel);
1141: return(0);
1142: }
1144: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1145: {
1146: DM_Plex *mesh = (DM_Plex*) dm->data;
1147: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1148: MPI_Comm comm;
1149: DM refTree;
1150: PetscSection origParentSection, newParentSection;
1151: PetscInt *origParents, *origChildIDs;
1152: PetscBool flg;
1153: PetscErrorCode ierr;
1158: PetscObjectGetComm((PetscObject)dm, &comm);
1160: /* Set up tree */
1161: DMPlexGetReferenceTree(dm,&refTree);
1162: DMPlexSetReferenceTree(dmParallel,refTree);
1163: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1164: if (origParentSection) {
1165: PetscInt pStart, pEnd;
1166: PetscInt *newParents, *newChildIDs, *globParents;
1167: PetscInt *remoteOffsetsParents, newParentSize;
1168: PetscSF parentSF;
1170: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1171: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1172: PetscSectionSetChart(newParentSection,pStart,pEnd);
1173: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1174: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1175: PetscFree(remoteOffsetsParents);
1176: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1177: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1178: if (original) {
1179: PetscInt numParents;
1181: PetscSectionGetStorageSize(origParentSection,&numParents);
1182: PetscMalloc1(numParents,&globParents);
1183: ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1184: }
1185: else {
1186: globParents = origParents;
1187: }
1188: PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1189: PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1190: if (original) {
1191: PetscFree(globParents);
1192: }
1193: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1194: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1195: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1196: #if defined(PETSC_USE_DEBUG)
1197: {
1198: PetscInt p;
1199: PetscBool valid = PETSC_TRUE;
1200: for (p = 0; p < newParentSize; ++p) {
1201: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1202: }
1203: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1204: }
1205: #endif
1206: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1207: if (flg) {
1208: PetscPrintf(comm, "Serial Parent Section: \n");
1209: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1210: PetscPrintf(comm, "Parallel Parent Section: \n");
1211: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1212: PetscSFView(parentSF, NULL);
1213: }
1214: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1215: PetscSectionDestroy(&newParentSection);
1216: PetscFree2(newParents,newChildIDs);
1217: PetscSFDestroy(&parentSF);
1218: }
1219: pmesh->useAnchors = mesh->useAnchors;
1220: return(0);
1221: }
1223: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1224: {
1225: PetscMPIInt rank, size;
1226: MPI_Comm comm;
1227: PetscErrorCode ierr;
1233: /* Create point SF for parallel mesh */
1234: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1235: PetscObjectGetComm((PetscObject)dm, &comm);
1236: MPI_Comm_rank(comm, &rank);
1237: MPI_Comm_size(comm, &size);
1238: {
1239: const PetscInt *leaves;
1240: PetscSFNode *remotePoints, *rowners, *lowners;
1241: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1242: PetscInt pStart, pEnd;
1244: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1245: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1246: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1247: for (p=0; p<numRoots; p++) {
1248: rowners[p].rank = -1;
1249: rowners[p].index = -1;
1250: }
1251: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1252: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1253: for (p = 0; p < numLeaves; ++p) {
1254: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1255: lowners[p].rank = rank;
1256: lowners[p].index = leaves ? leaves[p] : p;
1257: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1258: lowners[p].rank = -2;
1259: lowners[p].index = -2;
1260: }
1261: }
1262: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1263: rowners[p].rank = -3;
1264: rowners[p].index = -3;
1265: }
1266: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1267: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1268: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1269: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1270: for (p = 0; p < numLeaves; ++p) {
1271: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1272: if (lowners[p].rank != rank) ++numGhostPoints;
1273: }
1274: PetscMalloc1(numGhostPoints, &ghostPoints);
1275: PetscMalloc1(numGhostPoints, &remotePoints);
1276: for (p = 0, gp = 0; p < numLeaves; ++p) {
1277: if (lowners[p].rank != rank) {
1278: ghostPoints[gp] = leaves ? leaves[p] : p;
1279: remotePoints[gp].rank = lowners[p].rank;
1280: remotePoints[gp].index = lowners[p].index;
1281: ++gp;
1282: }
1283: }
1284: PetscFree2(rowners,lowners);
1285: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1286: PetscSFSetFromOptions((dmParallel)->sf);
1287: }
1288: {
1289: PetscBool useCone, useClosure, useAnchors;
1291: DMGetBasicAdjacency(dm, &useCone, &useClosure);
1292: DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1293: DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1294: DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1295: }
1296: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1297: return(0);
1298: }
1300: /*@
1301: DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1303: Input Parameters:
1304: + dm - The DMPlex object
1305: - flg - Balance the partition?
1307: Level: intermediate
1309: .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1310: @*/
1311: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1312: {
1313: DM_Plex *mesh = (DM_Plex *)dm->data;
1316: mesh->partitionBalance = flg;
1317: return(0);
1318: }
1320: /*@
1321: DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1323: Input Parameter:
1324: + dm - The DMPlex object
1326: Output Parameter:
1327: + flg - Balance the partition?
1329: Level: intermediate
1331: .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1332: @*/
1333: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1334: {
1335: DM_Plex *mesh = (DM_Plex *)dm->data;
1340: *flg = mesh->partitionBalance;
1341: return(0);
1342: }
1344: /*@C
1345: DMPlexDerivePointSF - Build a point SF from an SF describing a point migration
1347: Input Parameter:
1348: + dm - The source DMPlex object
1349: . migrationSF - The star forest that describes the parallel point remapping
1350: . ownership - Flag causing a vote to determine point ownership
1352: Output Parameter:
1353: - pointSF - The star forest describing the point overlap in the remapped DM
1355: Level: developer
1357: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1358: @*/
1359: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1360: {
1361: PetscMPIInt rank, size;
1362: PetscInt p, nroots, nleaves, idx, npointLeaves;
1363: PetscInt *pointLocal;
1364: const PetscInt *leaves;
1365: const PetscSFNode *roots;
1366: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1367: Vec shifts;
1368: const PetscInt numShifts = 13759;
1369: const PetscScalar *shift = NULL;
1370: const PetscBool shiftDebug = PETSC_FALSE;
1371: PetscBool balance;
1372: PetscErrorCode ierr;
1376: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1377: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
1379: DMPlexGetPartitionBalance(dm, &balance);
1380: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1381: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1382: if (ownership) {
1383: /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1384: if (balance) {
1385: PetscRandom r;
1387: PetscRandomCreate(PETSC_COMM_SELF, &r);
1388: PetscRandomSetInterval(r, 0, 2467*size);
1389: VecCreate(PETSC_COMM_SELF, &shifts);
1390: VecSetSizes(shifts, numShifts, numShifts);
1391: VecSetType(shifts, VECSTANDARD);
1392: VecSetRandom(shifts, r);
1393: PetscRandomDestroy(&r);
1394: VecGetArrayRead(shifts, &shift);
1395: }
1397: /* Point ownership vote: Process with highest rank owns shared points */
1398: for (p = 0; p < nleaves; ++p) {
1399: if (shiftDebug) {
1400: PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);
1401: }
1402: /* Either put in a bid or we know we own it */
1403: leafNodes[p].rank = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1404: leafNodes[p].index = p;
1405: }
1406: for (p = 0; p < nroots; p++) {
1407: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1408: rootNodes[p].rank = -3;
1409: rootNodes[p].index = -3;
1410: }
1411: PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1412: PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1413: if (balance) {
1414: /* We've voted, now we need to get the rank. When we're balancing the partition, the "rank" in rootNotes is not
1415: * the rank but rather (rank + random)%size. So we do another reduction, voting the same way, but sending the
1416: * rank instead of the index. */
1417: PetscSFNode *rootRanks = NULL;
1418: PetscMalloc1(nroots, &rootRanks);
1419: for (p = 0; p < nroots; p++) {
1420: rootRanks[p].rank = -3;
1421: rootRanks[p].index = -3;
1422: }
1423: for (p = 0; p < nleaves; p++) leafNodes[p].index = rank;
1424: PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1425: PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);
1426: for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index;
1427: PetscFree(rootRanks);
1428: }
1429: } else {
1430: for (p = 0; p < nroots; p++) {
1431: rootNodes[p].index = -1;
1432: rootNodes[p].rank = rank;
1433: };
1434: for (p = 0; p < nleaves; p++) {
1435: /* Write new local id into old location */
1436: if (roots[p].rank == rank) {
1437: rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1438: }
1439: }
1440: }
1441: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1442: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1444: for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1445: if (leafNodes[p].rank != rank) npointLeaves++;
1446: }
1447: PetscMalloc1(npointLeaves, &pointLocal);
1448: PetscMalloc1(npointLeaves, &pointRemote);
1449: for (idx = 0, p = 0; p < nleaves; p++) {
1450: if (leafNodes[p].rank != rank) {
1451: pointLocal[idx] = p;
1452: pointRemote[idx] = leafNodes[p];
1453: idx++;
1454: }
1455: }
1456: if (shift) {
1457: VecRestoreArrayRead(shifts, &shift);
1458: VecDestroy(&shifts);
1459: }
1460: if (shiftDebug) {PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);}
1461: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1462: PetscSFSetFromOptions(*pointSF);
1463: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1464: PetscFree2(rootNodes, leafNodes);
1465: return(0);
1466: }
1468: /*@C
1469: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1470:
1471: Collective on DM and PetscSF
1473: Input Parameter:
1474: + dm - The source DMPlex object
1475: . sf - The star forest communication context describing the migration pattern
1477: Output Parameter:
1478: - targetDM - The target DMPlex object
1480: Level: intermediate
1482: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1483: @*/
1484: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1485: {
1486: MPI_Comm comm;
1487: PetscInt dim, cdim, nroots;
1488: PetscSF sfPoint;
1489: ISLocalToGlobalMapping ltogMigration;
1490: ISLocalToGlobalMapping ltogOriginal = NULL;
1491: PetscBool flg;
1492: PetscErrorCode ierr;
1496: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1497: PetscObjectGetComm((PetscObject) dm, &comm);
1498: DMGetDimension(dm, &dim);
1499: DMSetDimension(targetDM, dim);
1500: DMGetCoordinateDim(dm, &cdim);
1501: DMSetCoordinateDim(targetDM, cdim);
1503: /* Check for a one-to-all distribution pattern */
1504: DMGetPointSF(dm, &sfPoint);
1505: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1506: if (nroots >= 0) {
1507: IS isOriginal;
1508: PetscInt n, size, nleaves;
1509: PetscInt *numbering_orig, *numbering_new;
1511: /* Get the original point numbering */
1512: DMPlexCreatePointNumbering(dm, &isOriginal);
1513: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1514: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1515: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1516: /* Convert to positive global numbers */
1517: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1518: /* Derive the new local-to-global mapping from the old one */
1519: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1520: PetscMalloc1(nleaves, &numbering_new);
1521: PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1522: PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1523: ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);
1524: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1525: ISDestroy(&isOriginal);
1526: } else {
1527: /* One-to-all distribution pattern: We can derive LToG from SF */
1528: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1529: }
1530: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1531: if (flg) {
1532: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1533: ISLocalToGlobalMappingView(ltogMigration, NULL);
1534: }
1535: /* Migrate DM data to target DM */
1536: DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1537: DMPlexDistributeLabels(dm, sf, targetDM);
1538: DMPlexDistributeCoordinates(dm, sf, targetDM);
1539: DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1540: DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1541: ISLocalToGlobalMappingDestroy(<ogOriginal);
1542: ISLocalToGlobalMappingDestroy(<ogMigration);
1543: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1544: return(0);
1545: }
1547: PETSC_INTERN PetscErrorCode DMPlexPartitionLabelClosure_Private(DM,DMLabel,PetscInt,PetscInt,const PetscInt[],IS*);
1549: /*@C
1550: DMPlexDistribute - Distributes the mesh and any associated sections.
1552: Collective on DM
1554: Input Parameter:
1555: + dm - The original DMPlex object
1556: - overlap - The overlap of partitions, 0 is the default
1558: Output Parameter:
1559: + sf - The PetscSF used for point distribution, or NULL if not needed
1560: - dmParallel - The distributed DMPlex object
1562: Note: If the mesh was not distributed, the output dmParallel will be NULL.
1564: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1565: representation on the mesh.
1567: Level: intermediate
1569: .keywords: mesh, elements
1570: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency()
1571: @*/
1572: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1573: {
1574: MPI_Comm comm;
1575: PetscPartitioner partitioner;
1576: IS cellPart;
1577: PetscSection cellPartSection;
1578: DM dmCoord;
1579: DMLabel lblPartition, lblMigration;
1580: PetscSF sfMigration, sfStratified, sfPoint;
1581: PetscBool flg, balance;
1582: PetscMPIInt rank, size;
1583: PetscErrorCode ierr;
1591: if (sf) *sf = NULL;
1592: *dmParallel = NULL;
1593: PetscObjectGetComm((PetscObject)dm,&comm);
1594: MPI_Comm_rank(comm, &rank);
1595: MPI_Comm_size(comm, &size);
1596: if (size == 1) return(0);
1598: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1599: /* Create cell partition */
1600: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1601: PetscSectionCreate(comm, &cellPartSection);
1602: DMPlexGetPartitioner(dm, &partitioner);
1603: PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1604: {
1605: /* Convert partition to DMLabel */
1606: IS is;
1607: PetscHSetI ht;
1608: PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks, *iranks;
1609: const PetscInt *points;
1611: DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1612: /* Preallocate strata */
1613: PetscHSetICreate(&ht);
1614: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1615: for (proc = pStart; proc < pEnd; proc++) {
1616: PetscSectionGetDof(cellPartSection, proc, &npoints);
1617: if (npoints) {PetscHSetIAdd(ht, proc);}
1618: }
1619: PetscHSetIGetSize(ht, &nranks);
1620: PetscMalloc1(nranks, &iranks);
1621: PetscHSetIGetElems(ht, &poff, iranks);
1622: PetscHSetIDestroy(&ht);
1623: DMLabelAddStrata(lblPartition, nranks, iranks);
1624: PetscFree(iranks);
1625: /* Inline DMPlexPartitionLabelClosure() */
1626: ISGetIndices(cellPart, &points);
1627: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1628: for (proc = pStart; proc < pEnd; proc++) {
1629: PetscSectionGetDof(cellPartSection, proc, &npoints);
1630: if (!npoints) continue;
1631: PetscSectionGetOffset(cellPartSection, proc, &poff);
1632: DMPlexPartitionLabelClosure_Private(dm, lblPartition, proc, npoints, points+poff, &is);
1633: DMLabelSetStratumIS(lblPartition, proc, is);
1634: ISDestroy(&is);
1635: }
1636: ISRestoreIndices(cellPart, &points);
1637: }
1638: DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1639: DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1640: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1641: /* Stratify the SF in case we are migrating an already parallel plex */
1642: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1643: PetscSFDestroy(&sfMigration);
1644: sfMigration = sfStratified;
1645: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1646: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1647: if (flg) {
1648: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1649: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1650: }
1652: /* Create non-overlapping parallel DM and migrate internal data */
1653: DMPlexCreate(comm, dmParallel);
1654: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1655: DMPlexMigrate(dm, sfMigration, *dmParallel);
1657: /* Build the point SF without overlap */
1658: DMPlexGetPartitionBalance(dm, &balance);
1659: DMPlexSetPartitionBalance(*dmParallel, balance);
1660: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1661: DMSetPointSF(*dmParallel, sfPoint);
1662: DMGetCoordinateDM(*dmParallel, &dmCoord);
1663: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1664: if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}
1666: if (overlap > 0) {
1667: DM dmOverlap;
1668: PetscInt nroots, nleaves;
1669: PetscSFNode *newRemote;
1670: const PetscSFNode *oldRemote;
1671: PetscSF sfOverlap, sfOverlapPoint;
1672: /* Add the partition overlap to the distributed DM */
1673: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1674: DMDestroy(dmParallel);
1675: *dmParallel = dmOverlap;
1676: if (flg) {
1677: PetscPrintf(comm, "Overlap Migration SF:\n");
1678: PetscSFView(sfOverlap, NULL);
1679: }
1681: /* Re-map the migration SF to establish the full migration pattern */
1682: PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1683: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1684: PetscMalloc1(nleaves, &newRemote);
1685: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1686: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1687: PetscSFCreate(comm, &sfOverlapPoint);
1688: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1689: PetscSFDestroy(&sfOverlap);
1690: PetscSFDestroy(&sfMigration);
1691: sfMigration = sfOverlapPoint;
1692: }
1693: /* Cleanup Partition */
1694: DMLabelDestroy(&lblPartition);
1695: DMLabelDestroy(&lblMigration);
1696: PetscSectionDestroy(&cellPartSection);
1697: ISDestroy(&cellPart);
1698: /* Copy BC */
1699: DMCopyBoundary(dm, *dmParallel);
1700: /* Create sfNatural */
1701: if (dm->useNatural) {
1702: PetscSection section;
1704: DMGetSection(dm, §ion);
1705: DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1706: DMSetUseNatural(*dmParallel, PETSC_TRUE);
1707: }
1708: /* Cleanup */
1709: if (sf) {*sf = sfMigration;}
1710: else {PetscSFDestroy(&sfMigration);}
1711: PetscSFDestroy(&sfPoint);
1712: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1713: return(0);
1714: }
1716: /*@C
1717: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1719: Collective on DM
1721: Input Parameter:
1722: + dm - The non-overlapping distrbuted DMPlex object
1723: - overlap - The overlap of partitions
1725: Output Parameter:
1726: + sf - The PetscSF used for point distribution
1727: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1729: Note: If the mesh was not distributed, the return value is NULL.
1731: The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1732: representation on the mesh.
1734: Level: intermediate
1736: .keywords: mesh, elements
1737: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency()
1738: @*/
1739: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1740: {
1741: MPI_Comm comm;
1742: PetscMPIInt size, rank;
1743: PetscSection rootSection, leafSection;
1744: IS rootrank, leafrank;
1745: DM dmCoord;
1746: DMLabel lblOverlap;
1747: PetscSF sfOverlap, sfStratified, sfPoint;
1748: PetscErrorCode ierr;
1755: if (sf) *sf = NULL;
1756: *dmOverlap = NULL;
1757: PetscObjectGetComm((PetscObject)dm,&comm);
1758: MPI_Comm_size(comm, &size);
1759: MPI_Comm_rank(comm, &rank);
1760: if (size == 1) return(0);
1762: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1763: /* Compute point overlap with neighbouring processes on the distributed DM */
1764: PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);
1765: PetscSectionCreate(comm, &rootSection);
1766: PetscSectionCreate(comm, &leafSection);
1767: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1768: DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1769: /* Convert overlap label to stratified migration SF */
1770: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1771: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1772: PetscSFDestroy(&sfOverlap);
1773: sfOverlap = sfStratified;
1774: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1775: PetscSFSetFromOptions(sfOverlap);
1777: PetscSectionDestroy(&rootSection);
1778: PetscSectionDestroy(&leafSection);
1779: ISDestroy(&rootrank);
1780: ISDestroy(&leafrank);
1781: PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);
1783: /* Build the overlapping DM */
1784: DMPlexCreate(comm, dmOverlap);
1785: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1786: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1787: /* Build the new point SF */
1788: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1789: DMSetPointSF(*dmOverlap, sfPoint);
1790: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1791: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1792: PetscSFDestroy(&sfPoint);
1793: /* Cleanup overlap partition */
1794: DMLabelDestroy(&lblOverlap);
1795: if (sf) *sf = sfOverlap;
1796: else {PetscSFDestroy(&sfOverlap);}
1797: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1798: return(0);
1799: }
1801: /*@C
1802: DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1803: root process of the original's communicator.
1804:
1805: Collective on DM
1807: Input Parameters:
1808: . dm - the original DMPlex object
1810: Output Parameters:
1811: + sf - the PetscSF used for point distribution (optional)
1812: - gatherMesh - the gathered DM object, or NULL
1814: Level: intermediate
1816: .keywords: mesh
1817: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1818: @*/
1819: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1820: {
1821: MPI_Comm comm;
1822: PetscMPIInt size;
1823: PetscPartitioner oldPart, gatherPart;
1829: *gatherMesh = NULL;
1830: if (sf) *sf = NULL;
1831: comm = PetscObjectComm((PetscObject)dm);
1832: MPI_Comm_size(comm,&size);
1833: if (size == 1) return(0);
1834: DMPlexGetPartitioner(dm,&oldPart);
1835: PetscObjectReference((PetscObject)oldPart);
1836: PetscPartitionerCreate(comm,&gatherPart);
1837: PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1838: DMPlexSetPartitioner(dm,gatherPart);
1839: DMPlexDistribute(dm,0,sf,gatherMesh);
1841: DMPlexSetPartitioner(dm,oldPart);
1842: PetscPartitionerDestroy(&gatherPart);
1843: PetscPartitionerDestroy(&oldPart);
1844: return(0);
1845: }
1847: /*@C
1848: DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1849:
1850: Collective on DM
1852: Input Parameters:
1853: . dm - the original DMPlex object
1855: Output Parameters:
1856: + sf - the PetscSF used for point distribution (optional)
1857: - redundantMesh - the redundant DM object, or NULL
1859: Level: intermediate
1861: .keywords: mesh
1862: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1863: @*/
1864: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1865: {
1866: MPI_Comm comm;
1867: PetscMPIInt size, rank;
1868: PetscInt pStart, pEnd, p;
1869: PetscInt numPoints = -1;
1870: PetscSF migrationSF, sfPoint, gatherSF;
1871: DM gatherDM, dmCoord;
1872: PetscSFNode *points;
1878: *redundantMesh = NULL;
1879: comm = PetscObjectComm((PetscObject)dm);
1880: MPI_Comm_size(comm,&size);
1881: if (size == 1) {
1882: PetscObjectReference((PetscObject) dm);
1883: *redundantMesh = dm;
1884: if (sf) *sf = NULL;
1885: return(0);
1886: }
1887: DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);
1888: if (!gatherDM) return(0);
1889: MPI_Comm_rank(comm,&rank);
1890: DMPlexGetChart(gatherDM,&pStart,&pEnd);
1891: numPoints = pEnd - pStart;
1892: MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1893: PetscMalloc1(numPoints,&points);
1894: PetscSFCreate(comm,&migrationSF);
1895: for (p = 0; p < numPoints; p++) {
1896: points[p].index = p;
1897: points[p].rank = 0;
1898: }
1899: PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1900: DMPlexCreate(comm, redundantMesh);
1901: PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1902: DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1903: DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1904: DMSetPointSF(*redundantMesh, sfPoint);
1905: DMGetCoordinateDM(*redundantMesh, &dmCoord);
1906: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1907: PetscSFDestroy(&sfPoint);
1908: if (sf) {
1909: PetscSF tsf;
1911: PetscSFCompose(gatherSF,migrationSF,&tsf);
1912: DMPlexStratifyMigrationSF(dm, tsf, sf);
1913: PetscSFDestroy(&tsf);
1914: }
1915: PetscSFDestroy(&migrationSF);
1916: PetscSFDestroy(&gatherSF);
1917: DMDestroy(&gatherDM);
1918: return(0);
1919: }