Actual source code: plexdistribute.c
petsc-3.7.7 2017-09-25
1: #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/
2: #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/
6: /*@
7: DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first
9: Input Parameters:
10: + dm - The DM object
11: - useCone - Flag to use the cone first
13: Level: intermediate
15: Notes:
16: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
17: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
18: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
20: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
21: @*/
22: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
23: {
24: DM_Plex *mesh = (DM_Plex *) dm->data;
28: mesh->useCone = useCone;
29: return(0);
30: }
34: /*@
35: DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first
37: Input Parameter:
38: . dm - The DM object
40: Output Parameter:
41: . useCone - Flag to use the cone first
43: Level: intermediate
45: Notes:
46: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
47: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
48: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
50: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
51: @*/
52: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
53: {
54: DM_Plex *mesh = (DM_Plex *) dm->data;
59: *useCone = mesh->useCone;
60: return(0);
61: }
65: /*@
66: DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure
68: Input Parameters:
69: + dm - The DM object
70: - useClosure - Flag to use the closure
72: Level: intermediate
74: Notes:
75: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
76: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
77: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
79: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
80: @*/
81: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
82: {
83: DM_Plex *mesh = (DM_Plex *) dm->data;
87: mesh->useClosure = useClosure;
88: return(0);
89: }
93: /*@
94: DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure
96: Input Parameter:
97: . dm - The DM object
99: Output Parameter:
100: . useClosure - Flag to use the closure
102: Level: intermediate
104: Notes:
105: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
106: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
107: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
109: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
110: @*/
111: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
112: {
113: DM_Plex *mesh = (DM_Plex *) dm->data;
118: *useClosure = mesh->useClosure;
119: return(0);
120: }
124: /*@
125: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
127: Input Parameters:
128: + dm - The DM object
129: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
131: Level: intermediate
133: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
134: @*/
135: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
136: {
137: DM_Plex *mesh = (DM_Plex *) dm->data;
141: mesh->useAnchors = useAnchors;
142: return(0);
143: }
147: /*@
148: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
150: Input Parameter:
151: . dm - The DM object
153: Output Parameter:
154: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
156: Level: intermediate
158: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
159: @*/
160: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
161: {
162: DM_Plex *mesh = (DM_Plex *) dm->data;
167: *useAnchors = mesh->useAnchors;
168: return(0);
169: }
173: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
174: {
175: const PetscInt *cone = NULL;
176: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
177: PetscErrorCode ierr;
180: DMPlexGetConeSize(dm, p, &coneSize);
181: DMPlexGetCone(dm, p, &cone);
182: for (c = 0; c <= coneSize; ++c) {
183: const PetscInt point = !c ? p : cone[c-1];
184: const PetscInt *support = NULL;
185: PetscInt supportSize, s, q;
187: DMPlexGetSupportSize(dm, point, &supportSize);
188: DMPlexGetSupport(dm, point, &support);
189: for (s = 0; s < supportSize; ++s) {
190: for (q = 0; q < numAdj || (adj[numAdj++] = support[s],0); ++q) {
191: if (support[s] == adj[q]) break;
192: }
193: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
194: }
195: }
196: *adjSize = numAdj;
197: return(0);
198: }
202: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
203: {
204: const PetscInt *support = NULL;
205: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
206: PetscErrorCode ierr;
209: DMPlexGetSupportSize(dm, p, &supportSize);
210: DMPlexGetSupport(dm, p, &support);
211: for (s = 0; s <= supportSize; ++s) {
212: const PetscInt point = !s ? p : support[s-1];
213: const PetscInt *cone = NULL;
214: PetscInt coneSize, c, q;
216: DMPlexGetConeSize(dm, point, &coneSize);
217: DMPlexGetCone(dm, point, &cone);
218: for (c = 0; c < coneSize; ++c) {
219: for (q = 0; q < numAdj || (adj[numAdj++] = cone[c],0); ++q) {
220: if (cone[c] == adj[q]) break;
221: }
222: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
223: }
224: }
225: *adjSize = numAdj;
226: return(0);
227: }
231: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
232: {
233: PetscInt *star = NULL;
234: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
238: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
239: for (s = 0; s < starSize*2; s += 2) {
240: const PetscInt *closure = NULL;
241: PetscInt closureSize, c, q;
243: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
244: for (c = 0; c < closureSize*2; c += 2) {
245: for (q = 0; q < numAdj || (adj[numAdj++] = closure[c],0); ++q) {
246: if (closure[c] == adj[q]) break;
247: }
248: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
249: }
250: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
251: }
252: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
253: *adjSize = numAdj;
254: return(0);
255: }
259: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
260: {
261: static PetscInt asiz = 0;
262: PetscInt maxAnchors = 1;
263: PetscInt aStart = -1, aEnd = -1;
264: PetscInt maxAdjSize;
265: PetscSection aSec = NULL;
266: IS aIS = NULL;
267: const PetscInt *anchors;
268: PetscErrorCode ierr;
271: if (useAnchors) {
272: DMPlexGetAnchors(dm,&aSec,&aIS);
273: if (aSec) {
274: PetscSectionGetMaxDof(aSec,&maxAnchors);
275: maxAnchors = PetscMax(1,maxAnchors);
276: PetscSectionGetChart(aSec,&aStart,&aEnd);
277: ISGetIndices(aIS,&anchors);
278: }
279: }
280: if (!*adj) {
281: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
283: DMPlexGetChart(dm, &pStart,&pEnd);
284: DMPlexGetDepth(dm, &depth);
285: DMPlexGetMaxSizes(dm, &maxC, &maxS);
286: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
287: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
288: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
289: asiz *= maxAnchors;
290: asiz = PetscMin(asiz,pEnd-pStart);
291: PetscMalloc1(asiz,adj);
292: }
293: if (*adjSize < 0) *adjSize = asiz;
294: maxAdjSize = *adjSize;
295: if (useTransitiveClosure) {
296: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
297: } else if (useCone) {
298: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
299: } else {
300: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
301: }
302: if (useAnchors && aSec) {
303: PetscInt origSize = *adjSize;
304: PetscInt numAdj = origSize;
305: PetscInt i = 0, j;
306: PetscInt *orig = *adj;
308: while (i < origSize) {
309: PetscInt p = orig[i];
310: PetscInt aDof = 0;
312: if (p >= aStart && p < aEnd) {
313: PetscSectionGetDof(aSec,p,&aDof);
314: }
315: if (aDof) {
316: PetscInt aOff;
317: PetscInt s, q;
319: for (j = i + 1; j < numAdj; j++) {
320: orig[j - 1] = orig[j];
321: }
322: origSize--;
323: numAdj--;
324: PetscSectionGetOffset(aSec,p,&aOff);
325: for (s = 0; s < aDof; ++s) {
326: for (q = 0; q < numAdj || (orig[numAdj++] = anchors[aOff+s],0); ++q) {
327: if (anchors[aOff+s] == orig[q]) break;
328: }
329: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
330: }
331: }
332: else {
333: i++;
334: }
335: }
336: *adjSize = numAdj;
337: ISRestoreIndices(aIS,&anchors);
338: }
339: return(0);
340: }
344: /*@
345: DMPlexGetAdjacency - Return all points adjacent to the given point
347: Input Parameters:
348: + dm - The DM object
349: . p - The point
350: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
351: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize
353: Output Parameters:
354: + adjSize - The number of adjacent points
355: - adj - The adjacent points
357: Level: advanced
359: Notes: The user must PetscFree the adj array if it was not passed in.
361: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
362: @*/
363: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
364: {
365: DM_Plex *mesh = (DM_Plex *) dm->data;
372: DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);
373: return(0);
374: }
378: /*@
379: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
381: Collective on DM
383: Input Parameters:
384: + dm - The DM
385: - sfPoint - The PetscSF which encodes point connectivity
387: Output Parameters:
388: + processRanks - A list of process neighbors, or NULL
389: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
391: Level: developer
393: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
394: @*/
395: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
396: {
397: const PetscSFNode *remotePoints;
398: PetscInt *localPointsNew;
399: PetscSFNode *remotePointsNew;
400: const PetscInt *nranks;
401: PetscInt *ranksNew;
402: PetscBT neighbors;
403: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
404: PetscMPIInt numProcs, proc, rank;
405: PetscErrorCode ierr;
412: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);
413: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
414: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
415: PetscBTCreate(numProcs, &neighbors);
416: PetscBTMemzero(numProcs, neighbors);
417: /* Compute root-to-leaf process connectivity */
418: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
419: ISGetIndices(rootRanks, &nranks);
420: for (p = pStart; p < pEnd; ++p) {
421: PetscInt ndof, noff, n;
423: PetscSectionGetDof(rootRankSection, p, &ndof);
424: PetscSectionGetOffset(rootRankSection, p, &noff);
425: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
426: }
427: ISRestoreIndices(rootRanks, &nranks);
428: /* Compute leaf-to-neighbor process connectivity */
429: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
430: ISGetIndices(leafRanks, &nranks);
431: for (p = pStart; p < pEnd; ++p) {
432: PetscInt ndof, noff, n;
434: PetscSectionGetDof(leafRankSection, p, &ndof);
435: PetscSectionGetOffset(leafRankSection, p, &noff);
436: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
437: }
438: ISRestoreIndices(leafRanks, &nranks);
439: /* Compute leaf-to-root process connectivity */
440: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
441: /* Calculate edges */
442: PetscBTClear(neighbors, rank);
443: for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
444: PetscMalloc1(numNeighbors, &ranksNew);
445: PetscMalloc1(numNeighbors, &localPointsNew);
446: PetscMalloc1(numNeighbors, &remotePointsNew);
447: for(proc = 0, n = 0; proc < numProcs; ++proc) {
448: if (PetscBTLookup(neighbors, proc)) {
449: ranksNew[n] = proc;
450: localPointsNew[n] = proc;
451: remotePointsNew[n].index = rank;
452: remotePointsNew[n].rank = proc;
453: ++n;
454: }
455: }
456: PetscBTDestroy(&neighbors);
457: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
458: else {PetscFree(ranksNew);}
459: if (sfProcess) {
460: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
461: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
462: PetscSFSetFromOptions(*sfProcess);
463: PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
464: }
465: return(0);
466: }
470: /*@
471: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
473: Collective on DM
475: Input Parameter:
476: . dm - The DM
478: Output Parameters:
479: + rootSection - The number of leaves for a given root point
480: . rootrank - The rank of each edge into the root point
481: . leafSection - The number of processes sharing a given leaf point
482: - leafrank - The rank of each process sharing a leaf point
484: Level: developer
486: .seealso: DMPlexCreateOverlap()
487: @*/
488: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
489: {
490: MPI_Comm comm;
491: PetscSF sfPoint;
492: const PetscInt *rootdegree;
493: PetscInt *myrank, *remoterank;
494: PetscInt pStart, pEnd, p, nedges;
495: PetscMPIInt rank;
496: PetscErrorCode ierr;
499: PetscObjectGetComm((PetscObject) dm, &comm);
500: MPI_Comm_rank(comm, &rank);
501: DMPlexGetChart(dm, &pStart, &pEnd);
502: DMGetPointSF(dm, &sfPoint);
503: /* Compute number of leaves for each root */
504: PetscObjectSetName((PetscObject) rootSection, "Root Section");
505: PetscSectionSetChart(rootSection, pStart, pEnd);
506: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
507: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
508: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
509: PetscSectionSetUp(rootSection);
510: /* Gather rank of each leaf to root */
511: PetscSectionGetStorageSize(rootSection, &nedges);
512: PetscMalloc1(pEnd-pStart, &myrank);
513: PetscMalloc1(nedges, &remoterank);
514: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
515: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
516: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
517: PetscFree(myrank);
518: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
519: /* Distribute remote ranks to leaves */
520: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
521: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
522: return(0);
523: }
527: /*@C
528: DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.
530: Collective on DM
532: Input Parameters:
533: + dm - The DM
534: . levels - Number of overlap levels
535: . rootSection - The number of leaves for a given root point
536: . rootrank - The rank of each edge into the root point
537: . leafSection - The number of processes sharing a given leaf point
538: - leafrank - The rank of each process sharing a leaf point
540: Output Parameters:
541: + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
543: Level: developer
545: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
546: @*/
547: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
548: {
549: MPI_Comm comm;
550: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
551: PetscSF sfPoint, sfProc;
552: const PetscSFNode *remote;
553: const PetscInt *local;
554: const PetscInt *nrank, *rrank;
555: PetscInt *adj = NULL;
556: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
557: PetscMPIInt rank, numProcs;
558: PetscBool useCone, useClosure, flg;
559: PetscErrorCode ierr;
562: PetscObjectGetComm((PetscObject) dm, &comm);
563: MPI_Comm_size(comm, &numProcs);
564: MPI_Comm_rank(comm, &rank);
565: DMGetPointSF(dm, &sfPoint);
566: DMPlexGetChart(dm, &pStart, &pEnd);
567: PetscSectionGetChart(leafSection, &sStart, &sEnd);
568: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
569: DMLabelCreate("Overlap adjacency", &ovAdjByRank);
570: /* Handle leaves: shared with the root point */
571: for (l = 0; l < nleaves; ++l) {
572: PetscInt adjSize = PETSC_DETERMINE, a;
574: DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);
575: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
576: }
577: ISGetIndices(rootrank, &rrank);
578: ISGetIndices(leafrank, &nrank);
579: /* Handle roots */
580: for (p = pStart; p < pEnd; ++p) {
581: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
583: if ((p >= sStart) && (p < sEnd)) {
584: /* Some leaves share a root with other leaves on different processes */
585: PetscSectionGetDof(leafSection, p, &neighbors);
586: if (neighbors) {
587: PetscSectionGetOffset(leafSection, p, &noff);
588: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
589: for (n = 0; n < neighbors; ++n) {
590: const PetscInt remoteRank = nrank[noff+n];
592: if (remoteRank == rank) continue;
593: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
594: }
595: }
596: }
597: /* Roots are shared with leaves */
598: PetscSectionGetDof(rootSection, p, &neighbors);
599: if (!neighbors) continue;
600: PetscSectionGetOffset(rootSection, p, &noff);
601: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
602: for (n = 0; n < neighbors; ++n) {
603: const PetscInt remoteRank = rrank[noff+n];
605: if (remoteRank == rank) continue;
606: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
607: }
608: }
609: PetscFree(adj);
610: ISRestoreIndices(rootrank, &rrank);
611: ISRestoreIndices(leafrank, &nrank);
612: /* Add additional overlap levels */
613: for (l = 1; l < levels; l++) {
614: /* Propagate point donations over SF to capture remote connections */
615: DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
616: /* Add next level of point donations to the label */
617: DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
618: }
619: /* We require the closure in the overlap */
620: DMPlexGetAdjacencyUseCone(dm, &useCone);
621: DMPlexGetAdjacencyUseClosure(dm, &useClosure);
622: if (useCone || !useClosure) {
623: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
624: }
625: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
626: if (flg) {
627: DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
628: }
629: /* Make global process SF and invert sender to receiver label */
630: {
631: /* Build a global process SF */
632: PetscSFNode *remoteProc;
633: PetscMalloc1(numProcs, &remoteProc);
634: for (p = 0; p < numProcs; ++p) {
635: remoteProc[p].rank = p;
636: remoteProc[p].index = rank;
637: }
638: PetscSFCreate(comm, &sfProc);
639: PetscObjectSetName((PetscObject) sfProc, "Process SF");
640: PetscSFSetGraph(sfProc, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
641: }
642: DMLabelCreate("Overlap label", ovLabel);
643: DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
644: /* Add owned points, except for shared local points */
645: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
646: for (l = 0; l < nleaves; ++l) {
647: DMLabelClearValue(*ovLabel, local[l], rank);
648: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
649: }
650: /* Clean up */
651: DMLabelDestroy(&ovAdjByRank);
652: PetscSFDestroy(&sfProc);
653: return(0);
654: }
658: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
659: {
660: MPI_Comm comm;
661: PetscMPIInt rank, numProcs;
662: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
663: PetscInt *pointDepths, *remoteDepths, *ilocal;
664: PetscInt *depthRecv, *depthShift, *depthIdx;
665: PetscSFNode *iremote;
666: PetscSF pointSF;
667: const PetscInt *sharedLocal;
668: const PetscSFNode *overlapRemote, *sharedRemote;
669: PetscErrorCode ierr;
673: PetscObjectGetComm((PetscObject)dm, &comm);
674: MPI_Comm_rank(comm, &rank);
675: MPI_Comm_size(comm, &numProcs);
676: DMGetDimension(dm, &dim);
678: /* Before building the migration SF we need to know the new stratum offsets */
679: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
680: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
681: for (d=0; d<dim+1; d++) {
682: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
683: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
684: }
685: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
686: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
687: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);
689: /* Count recevied points in each stratum and compute the internal strata shift */
690: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
691: for (d=0; d<dim+1; d++) depthRecv[d]=0;
692: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
693: depthShift[dim] = 0;
694: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
695: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
696: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
697: for (d=0; d<dim+1; d++) {
698: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
699: depthIdx[d] = pStart + depthShift[d];
700: }
702: /* Form the overlap SF build an SF that describes the full overlap migration SF */
703: DMPlexGetChart(dm, &pStart, &pEnd);
704: newLeaves = pEnd - pStart + nleaves;
705: PetscMalloc1(newLeaves, &ilocal);
706: PetscMalloc1(newLeaves, &iremote);
707: /* First map local points to themselves */
708: for (d=0; d<dim+1; d++) {
709: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
710: for (p=pStart; p<pEnd; p++) {
711: point = p + depthShift[d];
712: ilocal[point] = point;
713: iremote[point].index = p;
714: iremote[point].rank = rank;
715: depthIdx[d]++;
716: }
717: }
719: /* Add in the remote roots for currently shared points */
720: DMGetPointSF(dm, &pointSF);
721: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
722: for (d=0; d<dim+1; d++) {
723: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
724: for (p=0; p<numSharedPoints; p++) {
725: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
726: point = sharedLocal[p] + depthShift[d];
727: iremote[point].index = sharedRemote[p].index;
728: iremote[point].rank = sharedRemote[p].rank;
729: }
730: }
731: }
733: /* Now add the incoming overlap points */
734: for (p=0; p<nleaves; p++) {
735: point = depthIdx[remoteDepths[p]];
736: ilocal[point] = point;
737: iremote[point].index = overlapRemote[p].index;
738: iremote[point].rank = overlapRemote[p].rank;
739: depthIdx[remoteDepths[p]]++;
740: }
741: PetscFree2(pointDepths,remoteDepths);
743: PetscSFCreate(comm, migrationSF);
744: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
745: PetscSFSetFromOptions(*migrationSF);
746: DMPlexGetChart(dm, &pStart, &pEnd);
747: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
749: PetscFree3(depthRecv, depthShift, depthIdx);
750: return(0);
751: }
755: /*@
756: DMPlexStratifyMigrationSF - Add partition overlap to a distributed non-overlapping DM.
758: Input Parameter:
759: + dm - The DM
760: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
762: Output Parameter:
763: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
765: Level: developer
767: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
768: @*/
769: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
770: {
771: MPI_Comm comm;
772: PetscMPIInt rank, numProcs;
773: PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
774: PetscInt *pointDepths, *remoteDepths, *ilocal;
775: PetscInt *depthRecv, *depthShift, *depthIdx;
776: const PetscSFNode *iremote;
777: PetscErrorCode ierr;
781: PetscObjectGetComm((PetscObject) dm, &comm);
782: MPI_Comm_rank(comm, &rank);
783: MPI_Comm_size(comm, &numProcs);
784: DMPlexGetDepth(dm, &ldepth);
785: MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
786: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
788: /* Before building the migration SF we need to know the new stratum offsets */
789: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
790: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
791: for (d = 0; d < depth+1; ++d) {
792: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
793: for (p = pStart; p < pEnd; ++p) pointDepths[p] = d;
794: }
795: for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
796: PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
797: PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
798: /* Count recevied points in each stratum and compute the internal strata shift */
799: PetscMalloc3(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx);
800: for (d = 0; d < depth+1; ++d) depthRecv[d] = 0;
801: for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
802: depthShift[depth] = 0;
803: for (d = 0; d < depth; ++d) depthShift[d] = depthRecv[depth];
804: for (d = 1; d < depth; ++d) depthShift[d] += depthRecv[0];
805: for (d = depth-2; d > 0; --d) depthShift[d] += depthRecv[d+1];
806: for (d = 0; d < depth+1; ++d) {depthIdx[d] = 0;}
807: /* Derive a new local permutation based on stratified indices */
808: PetscMalloc1(nleaves, &ilocal);
809: for (p = 0; p < nleaves; ++p) {
810: const PetscInt dep = remoteDepths[p];
812: ilocal[p] = depthShift[dep] + depthIdx[dep];
813: depthIdx[dep]++;
814: }
815: PetscSFCreate(comm, migrationSF);
816: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
817: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
818: PetscFree2(pointDepths,remoteDepths);
819: PetscFree3(depthRecv, depthShift, depthIdx);
820: return(0);
821: }
825: /*@
826: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
828: Collective on DM
830: Input Parameters:
831: + dm - The DMPlex object
832: . pointSF - The PetscSF describing the communication pattern
833: . originalSection - The PetscSection for existing data layout
834: - originalVec - The existing data
836: Output Parameters:
837: + newSection - The PetscSF describing the new data layout
838: - newVec - The new data
840: Level: developer
842: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
843: @*/
844: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
845: {
846: PetscSF fieldSF;
847: PetscInt *remoteOffsets, fieldSize;
848: PetscScalar *originalValues, *newValues;
852: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
853: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
855: PetscSectionGetStorageSize(newSection, &fieldSize);
856: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
857: VecSetType(newVec,dm->vectype);
859: VecGetArray(originalVec, &originalValues);
860: VecGetArray(newVec, &newValues);
861: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
862: PetscFree(remoteOffsets);
863: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
864: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
865: PetscSFDestroy(&fieldSF);
866: VecRestoreArray(newVec, &newValues);
867: VecRestoreArray(originalVec, &originalValues);
868: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
869: return(0);
870: }
874: /*@
875: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
877: Collective on DM
879: Input Parameters:
880: + dm - The DMPlex object
881: . pointSF - The PetscSF describing the communication pattern
882: . originalSection - The PetscSection for existing data layout
883: - originalIS - The existing data
885: Output Parameters:
886: + newSection - The PetscSF describing the new data layout
887: - newIS - The new data
889: Level: developer
891: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
892: @*/
893: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
894: {
895: PetscSF fieldSF;
896: PetscInt *newValues, *remoteOffsets, fieldSize;
897: const PetscInt *originalValues;
898: PetscErrorCode ierr;
901: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
902: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
904: PetscSectionGetStorageSize(newSection, &fieldSize);
905: PetscMalloc1(fieldSize, &newValues);
907: ISGetIndices(originalIS, &originalValues);
908: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
909: PetscFree(remoteOffsets);
910: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
911: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
912: PetscSFDestroy(&fieldSF);
913: ISRestoreIndices(originalIS, &originalValues);
914: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
915: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
916: return(0);
917: }
921: /*@
922: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
924: Collective on DM
926: Input Parameters:
927: + dm - The DMPlex object
928: . pointSF - The PetscSF describing the communication pattern
929: . originalSection - The PetscSection for existing data layout
930: . datatype - The type of data
931: - originalData - The existing data
933: Output Parameters:
934: + newSection - The PetscSection describing the new data layout
935: - newData - The new data
937: Level: developer
939: .seealso: DMPlexDistribute(), DMPlexDistributeField()
940: @*/
941: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
942: {
943: PetscSF fieldSF;
944: PetscInt *remoteOffsets, fieldSize;
945: PetscMPIInt dataSize;
949: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
950: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
952: PetscSectionGetStorageSize(newSection, &fieldSize);
953: MPI_Type_size(datatype, &dataSize);
954: PetscMalloc(fieldSize * dataSize, newData);
956: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
957: PetscFree(remoteOffsets);
958: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
959: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
960: PetscSFDestroy(&fieldSF);
961: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
962: return(0);
963: }
967: PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
968: {
969: DM_Plex *mesh = (DM_Plex*) dm->data;
970: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
971: MPI_Comm comm;
972: PetscSF coneSF;
973: PetscSection originalConeSection, newConeSection;
974: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
975: PetscBool flg;
976: PetscErrorCode ierr;
981: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
983: /* Distribute cone section */
984: PetscObjectGetComm((PetscObject)dm, &comm);
985: DMPlexGetConeSection(dm, &originalConeSection);
986: DMPlexGetConeSection(dmParallel, &newConeSection);
987: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
988: DMSetUp(dmParallel);
989: {
990: PetscInt pStart, pEnd, p;
992: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
993: for (p = pStart; p < pEnd; ++p) {
994: PetscInt coneSize;
995: PetscSectionGetDof(newConeSection, p, &coneSize);
996: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
997: }
998: }
999: /* Communicate and renumber cones */
1000: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
1001: PetscFree(remoteOffsets);
1002: DMPlexGetCones(dm, &cones);
1003: if (original) {
1004: PetscInt numCones;
1006: PetscSectionGetStorageSize(originalConeSection,&numCones); PetscMalloc1(numCones,&globCones);
1007: ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
1008: }
1009: else {
1010: globCones = cones;
1011: }
1012: DMPlexGetCones(dmParallel, &newCones);
1013: PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
1014: PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
1015: if (original) {
1016: PetscFree(globCones);
1017: }
1018: PetscSectionGetStorageSize(newConeSection, &newConesSize);
1019: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1020: #if PETSC_USE_DEBUG
1021: {
1022: PetscInt p;
1023: PetscBool valid = PETSC_TRUE;
1024: for (p = 0; p < newConesSize; ++p) {
1025: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1026: }
1027: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1028: }
1029: #endif
1030: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
1031: if (flg) {
1032: PetscPrintf(comm, "Serial Cone Section:\n");
1033: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1034: PetscPrintf(comm, "Parallel Cone Section:\n");
1035: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1036: PetscSFView(coneSF, NULL);
1037: }
1038: DMPlexGetConeOrientations(dm, &cones);
1039: DMPlexGetConeOrientations(dmParallel, &newCones);
1040: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1041: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1042: PetscSFDestroy(&coneSF);
1043: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1044: /* Create supports and stratify sieve */
1045: {
1046: PetscInt pStart, pEnd;
1048: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1049: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1050: }
1051: DMPlexSymmetrize(dmParallel);
1052: DMPlexStratify(dmParallel);
1053: pmesh->useCone = mesh->useCone;
1054: pmesh->useClosure = mesh->useClosure;
1055: return(0);
1056: }
1060: PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1061: {
1062: MPI_Comm comm;
1063: PetscSection originalCoordSection, newCoordSection;
1064: Vec originalCoordinates, newCoordinates;
1065: PetscInt bs;
1066: const char *name;
1067: const PetscReal *maxCell, *L;
1068: const DMBoundaryType *bd;
1069: PetscErrorCode ierr;
1075: PetscObjectGetComm((PetscObject)dm, &comm);
1076: DMGetCoordinateSection(dm, &originalCoordSection);
1077: DMGetCoordinateSection(dmParallel, &newCoordSection);
1078: DMGetCoordinatesLocal(dm, &originalCoordinates);
1079: if (originalCoordinates) {
1080: VecCreate(PETSC_COMM_SELF, &newCoordinates);
1081: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1082: PetscObjectSetName((PetscObject) newCoordinates, name);
1084: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1085: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1086: VecGetBlockSize(originalCoordinates, &bs);
1087: VecSetBlockSize(newCoordinates, bs);
1088: VecDestroy(&newCoordinates);
1089: }
1090: DMGetPeriodicity(dm, &maxCell, &L, &bd);
1091: if (L) {DMSetPeriodicity(dmParallel, maxCell, L, bd);}
1092: return(0);
1093: }
1097: /* Here we are assuming that process 0 always has everything */
1098: PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1099: {
1100: DM_Plex *mesh = (DM_Plex*) dm->data;
1101: MPI_Comm comm;
1102: DMLabel depthLabel;
1103: PetscMPIInt rank;
1104: PetscInt depth, d, numLabels, numLocalLabels, l;
1105: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1106: PetscObjectState depthState = -1;
1107: PetscErrorCode ierr;
1112: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1113: PetscObjectGetComm((PetscObject)dm, &comm);
1114: MPI_Comm_rank(comm, &rank);
1116: /* If the user has changed the depth label, communicate it instead */
1117: DMPlexGetDepth(dm, &depth);
1118: DMPlexGetDepthLabel(dm, &depthLabel);
1119: if (depthLabel) {DMLabelGetState(depthLabel, &depthState);}
1120: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1121: MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1122: if (sendDepth) {
1123: DMRemoveLabel(dmParallel, "depth", &depthLabel);
1124: DMLabelDestroy(&depthLabel);
1125: }
1126: /* Everyone must have either the same number of labels, or none */
1127: DMGetNumLabels(dm, &numLocalLabels);
1128: numLabels = numLocalLabels;
1129: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1130: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1131: for (l = numLabels-1; l >= 0; --l) {
1132: DMLabel label = NULL, labelNew = NULL;
1133: PetscBool isdepth;
1135: if (hasLabels) {
1136: DMGetLabelByNum(dm, l, &label);
1137: /* Skip "depth" because it is recreated */
1138: PetscStrcmp(label->name, "depth", &isdepth);
1139: }
1140: MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);
1141: if (isdepth && !sendDepth) continue;
1142: DMLabelDistribute(label, migrationSF, &labelNew);
1143: if (isdepth) {
1144: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1145: PetscInt gdepth;
1147: MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1148: if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1149: for (d = 0; d <= gdepth; ++d) {
1150: PetscBool has;
1152: DMLabelHasStratum(labelNew, d, &has);
1153: if (!has) DMLabelAddStratum(labelNew, d);
1154: }
1155: }
1156: DMAddLabel(dmParallel, labelNew);
1157: }
1158: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1159: return(0);
1160: }
1164: PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1165: {
1166: DM_Plex *mesh = (DM_Plex*) dm->data;
1167: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1168: MPI_Comm comm;
1169: const PetscInt *gpoints;
1170: PetscInt dim, depth, n, d;
1171: PetscErrorCode ierr;
1177: PetscObjectGetComm((PetscObject)dm, &comm);
1178: DMGetDimension(dm, &dim);
1180: /* Setup hybrid structure */
1181: for (d = 0; d <= dim; ++d) {pmesh->hybridPointMax[d] = mesh->hybridPointMax[d];}
1182: MPI_Bcast(pmesh->hybridPointMax, dim+1, MPIU_INT, 0, comm);
1183: ISLocalToGlobalMappingGetSize(renumbering, &n);
1184: ISLocalToGlobalMappingGetIndices(renumbering, &gpoints);
1185: DMPlexGetDepth(dm, &depth);
1186: for (d = 0; d <= dim; ++d) {
1187: PetscInt pmax = pmesh->hybridPointMax[d], newmax = 0, pEnd, stratum[2], p;
1189: if (pmax < 0) continue;
1190: DMPlexGetDepthStratum(dm, d > depth ? depth : d, &stratum[0], &stratum[1]);
1191: DMPlexGetDepthStratum(dmParallel, d, NULL, &pEnd);
1192: MPI_Bcast(stratum, 2, MPIU_INT, 0, comm);
1193: for (p = 0; p < n; ++p) {
1194: const PetscInt point = gpoints[p];
1196: if ((point >= stratum[0]) && (point < stratum[1]) && (point >= pmax)) ++newmax;
1197: }
1198: if (newmax > 0) pmesh->hybridPointMax[d] = pEnd - newmax;
1199: else pmesh->hybridPointMax[d] = -1;
1200: }
1201: ISLocalToGlobalMappingRestoreIndices(renumbering, &gpoints);
1202: return(0);
1203: }
1207: PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1208: {
1209: DM_Plex *mesh = (DM_Plex*) dm->data;
1210: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1211: MPI_Comm comm;
1212: DM refTree;
1213: PetscSection origParentSection, newParentSection;
1214: PetscInt *origParents, *origChildIDs;
1215: PetscBool flg;
1216: PetscErrorCode ierr;
1221: PetscObjectGetComm((PetscObject)dm, &comm);
1223: /* Set up tree */
1224: DMPlexGetReferenceTree(dm,&refTree);
1225: DMPlexSetReferenceTree(dmParallel,refTree);
1226: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1227: if (origParentSection) {
1228: PetscInt pStart, pEnd;
1229: PetscInt *newParents, *newChildIDs, *globParents;
1230: PetscInt *remoteOffsetsParents, newParentSize;
1231: PetscSF parentSF;
1233: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1234: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1235: PetscSectionSetChart(newParentSection,pStart,pEnd);
1236: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1237: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1238: PetscFree(remoteOffsetsParents);
1239: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1240: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1241: if (original) {
1242: PetscInt numParents;
1244: PetscSectionGetStorageSize(origParentSection,&numParents);
1245: PetscMalloc1(numParents,&globParents);
1246: ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1247: }
1248: else {
1249: globParents = origParents;
1250: }
1251: PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1252: PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1253: if (original) {
1254: PetscFree(globParents);
1255: }
1256: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1257: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1258: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1259: #if PETSC_USE_DEBUG
1260: {
1261: PetscInt p;
1262: PetscBool valid = PETSC_TRUE;
1263: for (p = 0; p < newParentSize; ++p) {
1264: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1265: }
1266: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1267: }
1268: #endif
1269: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1270: if (flg) {
1271: PetscPrintf(comm, "Serial Parent Section: \n");
1272: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1273: PetscPrintf(comm, "Parallel Parent Section: \n");
1274: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1275: PetscSFView(parentSF, NULL);
1276: }
1277: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1278: PetscSectionDestroy(&newParentSection);
1279: PetscFree2(newParents,newChildIDs);
1280: PetscSFDestroy(&parentSF);
1281: }
1282: pmesh->useAnchors = mesh->useAnchors;
1283: return(0);
1284: }
1288: PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1289: {
1290: DM_Plex *mesh = (DM_Plex*) dm->data;
1291: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1292: PetscMPIInt rank, numProcs;
1293: MPI_Comm comm;
1294: PetscErrorCode ierr;
1300: /* Create point SF for parallel mesh */
1301: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1302: PetscObjectGetComm((PetscObject)dm, &comm);
1303: MPI_Comm_rank(comm, &rank);
1304: MPI_Comm_size(comm, &numProcs);
1305: {
1306: const PetscInt *leaves;
1307: PetscSFNode *remotePoints, *rowners, *lowners;
1308: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1309: PetscInt pStart, pEnd;
1311: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1312: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1313: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1314: for (p=0; p<numRoots; p++) {
1315: rowners[p].rank = -1;
1316: rowners[p].index = -1;
1317: }
1318: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1319: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1320: for (p = 0; p < numLeaves; ++p) {
1321: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1322: lowners[p].rank = rank;
1323: lowners[p].index = leaves ? leaves[p] : p;
1324: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1325: lowners[p].rank = -2;
1326: lowners[p].index = -2;
1327: }
1328: }
1329: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1330: rowners[p].rank = -3;
1331: rowners[p].index = -3;
1332: }
1333: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1334: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1335: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1336: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1337: for (p = 0; p < numLeaves; ++p) {
1338: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1339: if (lowners[p].rank != rank) ++numGhostPoints;
1340: }
1341: PetscMalloc1(numGhostPoints, &ghostPoints);
1342: PetscMalloc1(numGhostPoints, &remotePoints);
1343: for (p = 0, gp = 0; p < numLeaves; ++p) {
1344: if (lowners[p].rank != rank) {
1345: ghostPoints[gp] = leaves ? leaves[p] : p;
1346: remotePoints[gp].rank = lowners[p].rank;
1347: remotePoints[gp].index = lowners[p].index;
1348: ++gp;
1349: }
1350: }
1351: PetscFree2(rowners,lowners);
1352: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1353: PetscSFSetFromOptions((dmParallel)->sf);
1354: }
1355: pmesh->useCone = mesh->useCone;
1356: pmesh->useClosure = mesh->useClosure;
1357: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1358: return(0);
1359: }
1363: /*@C
1364: DMPlexDerivePointSF - Build a point SF from an SF describing a point migration
1366: Input Parameter:
1367: + dm - The source DMPlex object
1368: . migrationSF - The star forest that describes the parallel point remapping
1369: . ownership - Flag causing a vote to determine point ownership
1371: Output Parameter:
1372: - pointSF - The star forest describing the point overlap in the remapped DM
1374: Level: developer
1376: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1377: @*/
1378: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1379: {
1380: PetscMPIInt rank;
1381: PetscInt p, nroots, nleaves, idx, npointLeaves;
1382: PetscInt *pointLocal;
1383: const PetscInt *leaves;
1384: const PetscSFNode *roots;
1385: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1386: PetscErrorCode ierr;
1390: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1392: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1393: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1394: if (ownership) {
1395: /* Point ownership vote: Process with highest rank ownes shared points */
1396: for (p = 0; p < nleaves; ++p) {
1397: /* Either put in a bid or we know we own it */
1398: leafNodes[p].rank = rank;
1399: leafNodes[p].index = p;
1400: }
1401: for (p = 0; p < nroots; p++) {
1402: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1403: rootNodes[p].rank = -3;
1404: rootNodes[p].index = -3;
1405: }
1406: PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1407: PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1408: } else {
1409: for (p = 0; p < nroots; p++) {
1410: rootNodes[p].index = -1;
1411: rootNodes[p].rank = rank;
1412: };
1413: for (p = 0; p < nleaves; p++) {
1414: /* Write new local id into old location */
1415: if (roots[p].rank == rank) {
1416: rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1417: }
1418: }
1419: }
1420: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1421: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1423: for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;}
1424: PetscMalloc1(npointLeaves, &pointLocal);
1425: PetscMalloc1(npointLeaves, &pointRemote);
1426: for (idx = 0, p = 0; p < nleaves; p++) {
1427: if (leafNodes[p].rank != rank) {
1428: pointLocal[idx] = p;
1429: pointRemote[idx] = leafNodes[p];
1430: idx++;
1431: }
1432: }
1433: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1434: PetscSFSetFromOptions(*pointSF);
1435: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1436: PetscFree2(rootNodes, leafNodes);
1437: return(0);
1438: }
1442: /*@C
1443: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1445: Input Parameter:
1446: + dm - The source DMPlex object
1447: . sf - The star forest communication context describing the migration pattern
1449: Output Parameter:
1450: - targetDM - The target DMPlex object
1452: Level: intermediate
1454: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1455: @*/
1456: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1457: {
1458: MPI_Comm comm;
1459: PetscInt dim, nroots;
1460: PetscSF sfPoint;
1461: ISLocalToGlobalMapping ltogMigration;
1462: ISLocalToGlobalMapping ltogOriginal = NULL;
1463: PetscBool flg;
1464: PetscErrorCode ierr;
1468: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1469: PetscObjectGetComm((PetscObject) dm, &comm);
1470: DMGetDimension(dm, &dim);
1471: DMSetDimension(targetDM, dim);
1473: /* Check for a one-to-all distribution pattern */
1474: DMGetPointSF(dm, &sfPoint);
1475: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1476: if (nroots >= 0) {
1477: IS isOriginal;
1478: PetscInt n, size, nleaves;
1479: PetscInt *numbering_orig, *numbering_new;
1480: /* Get the original point numbering */
1481: DMPlexCreatePointNumbering(dm, &isOriginal);
1482: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1483: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1484: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1485: /* Convert to positive global numbers */
1486: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1487: /* Derive the new local-to-global mapping from the old one */
1488: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1489: PetscMalloc1(nleaves, &numbering_new);
1490: PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1491: PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1492: ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);
1493: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1494: ISDestroy(&isOriginal);
1495: } else {
1496: /* One-to-all distribution pattern: We can derive LToG from SF */
1497: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1498: }
1499: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1500: if (flg) {
1501: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1502: ISLocalToGlobalMappingView(ltogMigration, NULL);
1503: }
1504: /* Migrate DM data to target DM */
1505: DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1506: DMPlexDistributeCoordinates(dm, sf, targetDM);
1507: DMPlexDistributeLabels(dm, sf, targetDM);
1508: DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1509: DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1510: ISLocalToGlobalMappingDestroy(<ogOriginal);
1511: ISLocalToGlobalMappingDestroy(<ogMigration);
1512: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1513: return(0);
1514: }
1518: /*@C
1519: DMPlexDistribute - Distributes the mesh and any associated sections.
1521: Not Collective
1523: Input Parameter:
1524: + dm - The original DMPlex object
1525: - overlap - The overlap of partitions, 0 is the default
1527: Output Parameter:
1528: + sf - The PetscSF used for point distribution
1529: - parallelMesh - The distributed DMPlex object, or NULL
1531: Note: If the mesh was not distributed, the return value is NULL.
1533: The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1534: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1535: representation on the mesh.
1537: Level: intermediate
1539: .keywords: mesh, elements
1540: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1541: @*/
1542: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1543: {
1544: MPI_Comm comm;
1545: PetscPartitioner partitioner;
1546: IS cellPart;
1547: PetscSection cellPartSection;
1548: DM dmCoord;
1549: DMLabel lblPartition, lblMigration;
1550: PetscSF sfProcess, sfMigration, sfStratified, sfPoint;
1551: PetscBool flg;
1552: PetscMPIInt rank, numProcs, p;
1553: PetscErrorCode ierr;
1560: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1561: PetscObjectGetComm((PetscObject)dm,&comm);
1562: MPI_Comm_rank(comm, &rank);
1563: MPI_Comm_size(comm, &numProcs);
1565: *dmParallel = NULL;
1566: if (numProcs == 1) return(0);
1568: /* Create cell partition */
1569: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1570: PetscSectionCreate(comm, &cellPartSection);
1571: DMPlexGetPartitioner(dm, &partitioner);
1572: PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1573: {
1574: /* Convert partition to DMLabel */
1575: PetscInt proc, pStart, pEnd, npoints, poffset;
1576: const PetscInt *points;
1577: DMLabelCreate("Point Partition", &lblPartition);
1578: ISGetIndices(cellPart, &points);
1579: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1580: for (proc = pStart; proc < pEnd; proc++) {
1581: PetscSectionGetDof(cellPartSection, proc, &npoints);
1582: PetscSectionGetOffset(cellPartSection, proc, &poffset);
1583: for (p = poffset; p < poffset+npoints; p++) {
1584: DMLabelSetValue(lblPartition, points[p], proc);
1585: }
1586: }
1587: ISRestoreIndices(cellPart, &points);
1588: }
1589: DMPlexPartitionLabelClosure(dm, lblPartition);
1590: {
1591: /* Build a global process SF */
1592: PetscSFNode *remoteProc;
1593: PetscMalloc1(numProcs, &remoteProc);
1594: for (p = 0; p < numProcs; ++p) {
1595: remoteProc[p].rank = p;
1596: remoteProc[p].index = rank;
1597: }
1598: PetscSFCreate(comm, &sfProcess);
1599: PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1600: PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1601: }
1602: DMLabelCreate("Point migration", &lblMigration);
1603: DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1604: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1605: /* Stratify the SF in case we are migrating an already parallel plex */
1606: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1607: PetscSFDestroy(&sfMigration);
1608: sfMigration = sfStratified;
1609: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1610: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1611: if (flg) {
1612: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1613: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1614: }
1616: /* Create non-overlapping parallel DM and migrate internal data */
1617: DMPlexCreate(comm, dmParallel);
1618: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1619: DMPlexMigrate(dm, sfMigration, *dmParallel);
1621: /* Build the point SF without overlap */
1622: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1623: DMSetPointSF(*dmParallel, sfPoint);
1624: DMGetCoordinateDM(*dmParallel, &dmCoord);
1625: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1626: if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}
1628: if (overlap > 0) {
1629: DM dmOverlap;
1630: PetscInt nroots, nleaves;
1631: PetscSFNode *newRemote;
1632: const PetscSFNode *oldRemote;
1633: PetscSF sfOverlap, sfOverlapPoint;
1634: /* Add the partition overlap to the distributed DM */
1635: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1636: DMDestroy(dmParallel);
1637: *dmParallel = dmOverlap;
1638: if (flg) {
1639: PetscPrintf(comm, "Overlap Migration SF:\n");
1640: PetscSFView(sfOverlap, NULL);
1641: }
1643: /* Re-map the migration SF to establish the full migration pattern */
1644: PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1645: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1646: PetscMalloc1(nleaves, &newRemote);
1647: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1648: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1649: PetscSFCreate(comm, &sfOverlapPoint);
1650: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1651: PetscSFDestroy(&sfOverlap);
1652: PetscSFDestroy(&sfMigration);
1653: sfMigration = sfOverlapPoint;
1654: }
1655: /* Cleanup Partition */
1656: PetscSFDestroy(&sfProcess);
1657: DMLabelDestroy(&lblPartition);
1658: DMLabelDestroy(&lblMigration);
1659: PetscSectionDestroy(&cellPartSection);
1660: ISDestroy(&cellPart);
1661: /* Copy BC */
1662: DMCopyBoundary(dm, *dmParallel);
1663: /* Create sfNatural */
1664: if (dm->useNatural) {
1665: PetscSection section;
1667: DMGetDefaultSection(dm, §ion);
1668: DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1669: }
1670: /* Cleanup */
1671: if (sf) {*sf = sfMigration;}
1672: else {PetscSFDestroy(&sfMigration);}
1673: PetscSFDestroy(&sfPoint);
1674: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1675: return(0);
1676: }
1680: /*@C
1681: DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM.
1683: Not Collective
1685: Input Parameter:
1686: + dm - The non-overlapping distrbuted DMPlex object
1687: - overlap - The overlap of partitions, 0 is the default
1689: Output Parameter:
1690: + sf - The PetscSF used for point distribution
1691: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1693: Note: If the mesh was not distributed, the return value is NULL.
1695: The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1696: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1697: representation on the mesh.
1699: Level: intermediate
1701: .keywords: mesh, elements
1702: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1703: @*/
1704: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1705: {
1706: MPI_Comm comm;
1707: PetscMPIInt rank;
1708: PetscSection rootSection, leafSection;
1709: IS rootrank, leafrank;
1710: DM dmCoord;
1711: DMLabel lblOverlap;
1712: PetscSF sfOverlap, sfStratified, sfPoint;
1713: PetscErrorCode ierr;
1720: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1721: PetscObjectGetComm((PetscObject)dm,&comm);
1722: MPI_Comm_rank(comm, &rank);
1724: /* Compute point overlap with neighbouring processes on the distributed DM */
1725: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1726: PetscSectionCreate(comm, &rootSection);
1727: PetscSectionCreate(comm, &leafSection);
1728: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1729: DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1730: /* Convert overlap label to stratified migration SF */
1731: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1732: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1733: PetscSFDestroy(&sfOverlap);
1734: sfOverlap = sfStratified;
1735: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1736: PetscSFSetFromOptions(sfOverlap);
1738: PetscSectionDestroy(&rootSection);
1739: PetscSectionDestroy(&leafSection);
1740: ISDestroy(&rootrank);
1741: ISDestroy(&leafrank);
1742: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1744: /* Build the overlapping DM */
1745: DMPlexCreate(comm, dmOverlap);
1746: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1747: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1748: /* Build the new point SF */
1749: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1750: DMSetPointSF(*dmOverlap, sfPoint);
1751: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1752: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1753: PetscSFDestroy(&sfPoint);
1754: /* Cleanup overlap partition */
1755: DMLabelDestroy(&lblOverlap);
1756: if (sf) *sf = sfOverlap;
1757: else {PetscSFDestroy(&sfOverlap);}
1758: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1759: return(0);
1760: }
1764: /*@C
1765: DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1766: root process of the original's communicator.
1768: Input Parameters:
1769: . dm - the original DMPlex object
1771: Output Parameters:
1772: . gatherMesh - the gathered DM object, or NULL
1774: Level: intermediate
1776: .keywords: mesh
1777: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1778: @*/
1779: PetscErrorCode DMPlexGetGatherDM(DM dm, DM * gatherMesh)
1780: {
1781: MPI_Comm comm;
1782: PetscMPIInt size;
1783: PetscPartitioner oldPart, gatherPart;
1788: comm = PetscObjectComm((PetscObject)dm);
1789: MPI_Comm_size(comm,&size);
1790: *gatherMesh = NULL;
1791: if (size == 1) return(0);
1792: DMPlexGetPartitioner(dm,&oldPart);
1793: PetscObjectReference((PetscObject)oldPart);
1794: PetscPartitionerCreate(comm,&gatherPart);
1795: PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1796: DMPlexSetPartitioner(dm,gatherPart);
1797: DMPlexDistribute(dm,0,NULL,gatherMesh);
1798: DMPlexSetPartitioner(dm,oldPart);
1799: PetscPartitionerDestroy(&gatherPart);
1800: PetscPartitionerDestroy(&oldPart);
1801: return(0);
1802: }
1806: /*@C
1807: DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1809: Input Parameters:
1810: . dm - the original DMPlex object
1812: Output Parameters:
1813: . redundantMesh - the redundant DM object, or NULL
1815: Level: intermediate
1817: .keywords: mesh
1818: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1819: @*/
1820: PetscErrorCode DMPlexGetRedundantDM(DM dm, DM * redundantMesh)
1821: {
1822: MPI_Comm comm;
1823: PetscMPIInt size, rank;
1824: PetscInt pStart, pEnd, p;
1825: PetscInt numPoints = -1;
1826: PetscSF migrationSF, sfPoint;
1827: DM gatherDM, dmCoord;
1828: PetscSFNode *points;
1833: comm = PetscObjectComm((PetscObject)dm);
1834: MPI_Comm_size(comm,&size);
1835: *redundantMesh = NULL;
1836: if (size == 1) return(0);
1837: DMPlexGetGatherDM(dm,&gatherDM);
1838: if (!gatherDM) return(0);
1839: MPI_Comm_rank(comm,&rank);
1840: DMPlexGetChart(gatherDM,&pStart,&pEnd);
1841: numPoints = pEnd - pStart;
1842: MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1843: PetscMalloc1(numPoints,&points);
1844: PetscSFCreate(comm,&migrationSF);
1845: for (p = 0; p < numPoints; p++) {
1846: points[p].index = p;
1847: points[p].rank = 0;
1848: }
1849: PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1850: DMPlexCreate(comm, redundantMesh);
1851: PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1852: DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1853: DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1854: DMSetPointSF(*redundantMesh, sfPoint);
1855: DMGetCoordinateDM(*redundantMesh, &dmCoord);
1856: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1857: PetscSFDestroy(&sfPoint);
1858: PetscSFDestroy(&migrationSF);
1859: DMDestroy(&gatherDM);
1860: return(0);
1861: }