Actual source code: plexdistribute.c
petsc-3.6.1 2015-08-06
1: #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/
5: /*@
6: DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first
8: Input Parameters:
9: + dm - The DM object
10: - useCone - Flag to use the cone first
12: Level: intermediate
14: Notes:
15: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
16: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
17: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
19: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
22: {
23: DM_Plex *mesh = (DM_Plex *) dm->data;
27: mesh->useCone = useCone;
28: return(0);
29: }
33: /*@
34: DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first
36: Input Parameter:
37: . dm - The DM object
39: Output Parameter:
40: . useCone - Flag to use the cone first
42: Level: intermediate
44: Notes:
45: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
46: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
47: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
49: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
50: @*/
51: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
52: {
53: DM_Plex *mesh = (DM_Plex *) dm->data;
58: *useCone = mesh->useCone;
59: return(0);
60: }
64: /*@
65: DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure
67: Input Parameters:
68: + dm - The DM object
69: - useClosure - Flag to use the closure
71: Level: intermediate
73: Notes:
74: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
75: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
76: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
78: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
79: @*/
80: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
81: {
82: DM_Plex *mesh = (DM_Plex *) dm->data;
86: mesh->useClosure = useClosure;
87: return(0);
88: }
92: /*@
93: DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure
95: Input Parameter:
96: . dm - The DM object
98: Output Parameter:
99: . useClosure - Flag to use the closure
101: Level: intermediate
103: Notes:
104: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
105: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
106: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
108: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
109: @*/
110: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
111: {
112: DM_Plex *mesh = (DM_Plex *) dm->data;
117: *useClosure = mesh->useClosure;
118: return(0);
119: }
123: /*@
124: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
126: Input Parameters:
127: + dm - The DM object
128: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
130: Level: intermediate
132: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
133: @*/
134: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
135: {
136: DM_Plex *mesh = (DM_Plex *) dm->data;
140: mesh->useAnchors = useAnchors;
141: return(0);
142: }
146: /*@
147: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
149: Input Parameter:
150: . dm - The DM object
152: Output Parameter:
153: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
155: Level: intermediate
157: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
158: @*/
159: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
160: {
161: DM_Plex *mesh = (DM_Plex *) dm->data;
166: *useAnchors = mesh->useAnchors;
167: return(0);
168: }
172: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
173: {
174: const PetscInt *cone = NULL;
175: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
176: PetscErrorCode ierr;
179: DMPlexGetConeSize(dm, p, &coneSize);
180: DMPlexGetCone(dm, p, &cone);
181: for (c = 0; c <= coneSize; ++c) {
182: const PetscInt point = !c ? p : cone[c-1];
183: const PetscInt *support = NULL;
184: PetscInt supportSize, s, q;
186: DMPlexGetSupportSize(dm, point, &supportSize);
187: DMPlexGetSupport(dm, point, &support);
188: for (s = 0; s < supportSize; ++s) {
189: for (q = 0; q < numAdj || (adj[numAdj++] = support[s],0); ++q) {
190: if (support[s] == adj[q]) break;
191: }
192: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
193: }
194: }
195: *adjSize = numAdj;
196: return(0);
197: }
201: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
202: {
203: const PetscInt *support = NULL;
204: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
205: PetscErrorCode ierr;
208: DMPlexGetSupportSize(dm, p, &supportSize);
209: DMPlexGetSupport(dm, p, &support);
210: for (s = 0; s <= supportSize; ++s) {
211: const PetscInt point = !s ? p : support[s-1];
212: const PetscInt *cone = NULL;
213: PetscInt coneSize, c, q;
215: DMPlexGetConeSize(dm, point, &coneSize);
216: DMPlexGetCone(dm, point, &cone);
217: for (c = 0; c < coneSize; ++c) {
218: for (q = 0; q < numAdj || (adj[numAdj++] = cone[c],0); ++q) {
219: if (cone[c] == adj[q]) break;
220: }
221: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
222: }
223: }
224: *adjSize = numAdj;
225: return(0);
226: }
230: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
231: {
232: PetscInt *star = NULL;
233: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
237: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
238: for (s = 0; s < starSize*2; s += 2) {
239: const PetscInt *closure = NULL;
240: PetscInt closureSize, c, q;
242: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
243: for (c = 0; c < closureSize*2; c += 2) {
244: for (q = 0; q < numAdj || (adj[numAdj++] = closure[c],0); ++q) {
245: if (closure[c] == adj[q]) break;
246: }
247: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
248: }
249: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
250: }
251: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
252: *adjSize = numAdj;
253: return(0);
254: }
258: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
259: {
260: static PetscInt asiz = 0;
261: PetscInt maxAnchors = 1;
262: PetscInt aStart = -1, aEnd = -1;
263: PetscInt maxAdjSize;
264: PetscSection aSec = NULL;
265: IS aIS = NULL;
266: const PetscInt *anchors;
267: PetscErrorCode ierr;
270: if (useAnchors) {
271: DMPlexGetAnchors(dm,&aSec,&aIS);
272: if (aSec) {
273: PetscSectionGetMaxDof(aSec,&maxAnchors);
274: maxAnchors = PetscMax(1,maxAnchors);
275: PetscSectionGetChart(aSec,&aStart,&aEnd);
276: ISGetIndices(aIS,&anchors);
277: }
278: }
279: if (!*adj) {
280: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
282: DMPlexGetChart(dm, &pStart,&pEnd);
283: DMPlexGetDepth(dm, &depth);
284: DMPlexGetMaxSizes(dm, &maxC, &maxS);
285: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
286: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
287: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
288: asiz *= maxAnchors;
289: asiz = PetscMin(asiz,pEnd-pStart);
290: PetscMalloc1(asiz,adj);
291: }
292: if (*adjSize < 0) *adjSize = asiz;
293: maxAdjSize = *adjSize;
294: if (useTransitiveClosure) {
295: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
296: } else if (useCone) {
297: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
298: } else {
299: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
300: }
301: if (useAnchors && aSec) {
302: PetscInt origSize = *adjSize;
303: PetscInt numAdj = origSize;
304: PetscInt i = 0, j;
305: PetscInt *orig = *adj;
307: while (i < origSize) {
308: PetscInt p = orig[i];
309: PetscInt aDof = 0;
311: if (p >= aStart && p < aEnd) {
312: PetscSectionGetDof(aSec,p,&aDof);
313: }
314: if (aDof) {
315: PetscInt aOff;
316: PetscInt s, q;
318: for (j = i + 1; j < numAdj; j++) {
319: orig[j - 1] = orig[j];
320: }
321: origSize--;
322: numAdj--;
323: PetscSectionGetOffset(aSec,p,&aOff);
324: for (s = 0; s < aDof; ++s) {
325: for (q = 0; q < numAdj || (orig[numAdj++] = anchors[aOff+s],0); ++q) {
326: if (anchors[aOff+s] == orig[q]) break;
327: }
328: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
329: }
330: }
331: else {
332: i++;
333: }
334: }
335: *adjSize = numAdj;
336: ISRestoreIndices(aIS,&anchors);
337: }
338: return(0);
339: }
343: /*@
344: DMPlexGetAdjacency - Return all points adjacent to the given point
346: Input Parameters:
347: + dm - The DM object
348: . p - The point
349: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
350: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize
352: Output Parameters:
353: + adjSize - The number of adjacent points
354: - adj - The adjacent points
356: Level: advanced
358: Notes: The user must PetscFree the adj array if it was not passed in.
360: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
361: @*/
362: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
363: {
364: DM_Plex *mesh = (DM_Plex *) dm->data;
371: DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);
372: return(0);
373: }
376: /*@
377: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
379: Collective on DM
381: Input Parameters:
382: + dm - The DM
383: - sfPoint - The PetscSF which encodes point connectivity
385: Output Parameters:
386: + processRanks - A list of process neighbors, or NULL
387: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
389: Level: developer
391: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
392: @*/
393: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
394: {
395: const PetscSFNode *remotePoints;
396: PetscInt *localPointsNew;
397: PetscSFNode *remotePointsNew;
398: const PetscInt *nranks;
399: PetscInt *ranksNew;
400: PetscBT neighbors;
401: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
402: PetscMPIInt numProcs, proc, rank;
403: PetscErrorCode ierr;
410: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);
411: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
412: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
413: PetscBTCreate(numProcs, &neighbors);
414: PetscBTMemzero(numProcs, neighbors);
415: /* Compute root-to-leaf process connectivity */
416: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
417: ISGetIndices(rootRanks, &nranks);
418: for (p = pStart; p < pEnd; ++p) {
419: PetscInt ndof, noff, n;
421: PetscSectionGetDof(rootRankSection, p, &ndof);
422: PetscSectionGetOffset(rootRankSection, p, &noff);
423: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
424: }
425: ISRestoreIndices(rootRanks, &nranks);
426: /* Compute leaf-to-neighbor process connectivity */
427: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
428: ISGetIndices(leafRanks, &nranks);
429: for (p = pStart; p < pEnd; ++p) {
430: PetscInt ndof, noff, n;
432: PetscSectionGetDof(leafRankSection, p, &ndof);
433: PetscSectionGetOffset(leafRankSection, p, &noff);
434: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
435: }
436: ISRestoreIndices(leafRanks, &nranks);
437: /* Compute leaf-to-root process connectivity */
438: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
439: /* Calculate edges */
440: PetscBTClear(neighbors, rank);
441: for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
442: PetscMalloc1(numNeighbors, &ranksNew);
443: PetscMalloc1(numNeighbors, &localPointsNew);
444: PetscMalloc1(numNeighbors, &remotePointsNew);
445: for(proc = 0, n = 0; proc < numProcs; ++proc) {
446: if (PetscBTLookup(neighbors, proc)) {
447: ranksNew[n] = proc;
448: localPointsNew[n] = proc;
449: remotePointsNew[n].index = rank;
450: remotePointsNew[n].rank = proc;
451: ++n;
452: }
453: }
454: PetscBTDestroy(&neighbors);
455: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
456: else {PetscFree(ranksNew);}
457: if (sfProcess) {
458: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
459: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
460: PetscSFSetFromOptions(*sfProcess);
461: PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
462: }
463: return(0);
464: }
468: /*@
469: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
471: Collective on DM
473: Input Parameter:
474: . dm - The DM
476: Output Parameters:
477: + rootSection - The number of leaves for a given root point
478: . rootrank - The rank of each edge into the root point
479: . leafSection - The number of processes sharing a given leaf point
480: - leafrank - The rank of each process sharing a leaf point
482: Level: developer
484: .seealso: DMPlexCreateOverlap()
485: @*/
486: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
487: {
488: MPI_Comm comm;
489: PetscSF sfPoint;
490: const PetscInt *rootdegree;
491: PetscInt *myrank, *remoterank;
492: PetscInt pStart, pEnd, p, nedges;
493: PetscMPIInt rank;
494: PetscErrorCode ierr;
497: PetscObjectGetComm((PetscObject) dm, &comm);
498: MPI_Comm_rank(comm, &rank);
499: DMPlexGetChart(dm, &pStart, &pEnd);
500: DMGetPointSF(dm, &sfPoint);
501: /* Compute number of leaves for each root */
502: PetscObjectSetName((PetscObject) rootSection, "Root Section");
503: PetscSectionSetChart(rootSection, pStart, pEnd);
504: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
505: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
506: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
507: PetscSectionSetUp(rootSection);
508: /* Gather rank of each leaf to root */
509: PetscSectionGetStorageSize(rootSection, &nedges);
510: PetscMalloc1(pEnd-pStart, &myrank);
511: PetscMalloc1(nedges, &remoterank);
512: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
513: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
514: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
515: PetscFree(myrank);
516: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
517: /* Distribute remote ranks to leaves */
518: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
519: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
520: return(0);
521: }
525: /*@C
526: DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.
528: Collective on DM
530: Input Parameters:
531: + dm - The DM
532: . levels - Number of overlap levels
533: . rootSection - The number of leaves for a given root point
534: . rootrank - The rank of each edge into the root point
535: . leafSection - The number of processes sharing a given leaf point
536: - leafrank - The rank of each process sharing a leaf point
538: Output Parameters:
539: + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
541: Level: developer
543: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
544: @*/
545: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
546: {
547: MPI_Comm comm;
548: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
549: PetscSF sfPoint, sfProc;
550: const PetscSFNode *remote;
551: const PetscInt *local;
552: const PetscInt *nrank, *rrank;
553: PetscInt *adj = NULL;
554: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
555: PetscMPIInt rank, numProcs;
556: PetscBool useCone, useClosure, flg;
557: PetscErrorCode ierr;
560: PetscObjectGetComm((PetscObject) dm, &comm);
561: MPI_Comm_size(comm, &numProcs);
562: MPI_Comm_rank(comm, &rank);
563: DMGetPointSF(dm, &sfPoint);
564: DMPlexGetChart(dm, &pStart, &pEnd);
565: PetscSectionGetChart(leafSection, &sStart, &sEnd);
566: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
567: DMLabelCreate("Overlap adjacency", &ovAdjByRank);
568: /* Handle leaves: shared with the root point */
569: for (l = 0; l < nleaves; ++l) {
570: PetscInt adjSize = PETSC_DETERMINE, a;
572: DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);
573: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
574: }
575: ISGetIndices(rootrank, &rrank);
576: ISGetIndices(leafrank, &nrank);
577: /* Handle roots */
578: for (p = pStart; p < pEnd; ++p) {
579: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
581: if ((p >= sStart) && (p < sEnd)) {
582: /* Some leaves share a root with other leaves on different processes */
583: PetscSectionGetDof(leafSection, p, &neighbors);
584: if (neighbors) {
585: PetscSectionGetOffset(leafSection, p, &noff);
586: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
587: for (n = 0; n < neighbors; ++n) {
588: const PetscInt remoteRank = nrank[noff+n];
590: if (remoteRank == rank) continue;
591: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
592: }
593: }
594: }
595: /* Roots are shared with leaves */
596: PetscSectionGetDof(rootSection, p, &neighbors);
597: if (!neighbors) continue;
598: PetscSectionGetOffset(rootSection, p, &noff);
599: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
600: for (n = 0; n < neighbors; ++n) {
601: const PetscInt remoteRank = rrank[noff+n];
603: if (remoteRank == rank) continue;
604: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
605: }
606: }
607: PetscFree(adj);
608: ISRestoreIndices(rootrank, &rrank);
609: ISRestoreIndices(leafrank, &nrank);
610: /* Add additional overlap levels */
611: for (l = 1; l < levels; l++) {DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);}
612: /* We require the closure in the overlap */
613: DMPlexGetAdjacencyUseCone(dm, &useCone);
614: DMPlexGetAdjacencyUseClosure(dm, &useClosure);
615: if (useCone || !useClosure) {
616: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
617: }
618: PetscOptionsHasName(((PetscObject) dm)->prefix, "-overlap_view", &flg);
619: if (flg) {
620: PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);
621: DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
622: }
623: /* Make process SF and invert sender to receiver label */
624: DMPlexCreateTwoSidedProcessSF(dm, sfPoint, rootSection, rootrank, leafSection, leafrank, NULL, &sfProc);
625: DMLabelCreate("Overlap label", ovLabel);
626: DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
627: /* Add owned points, except for shared local points */
628: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
629: for (l = 0; l < nleaves; ++l) {
630: DMLabelClearValue(*ovLabel, local[l], rank);
631: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
632: }
633: /* Clean up */
634: DMLabelDestroy(&ovAdjByRank);
635: PetscSFDestroy(&sfProc);
636: return(0);
637: }
641: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
642: {
643: MPI_Comm comm;
644: PetscMPIInt rank, numProcs;
645: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
646: PetscInt *pointDepths, *remoteDepths, *ilocal;
647: PetscInt *depthRecv, *depthShift, *depthIdx;
648: PetscSFNode *iremote;
649: PetscSF pointSF;
650: const PetscInt *sharedLocal;
651: const PetscSFNode *overlapRemote, *sharedRemote;
652: PetscErrorCode ierr;
656: PetscObjectGetComm((PetscObject)dm, &comm);
657: MPI_Comm_rank(comm, &rank);
658: MPI_Comm_size(comm, &numProcs);
659: DMGetDimension(dm, &dim);
661: /* Before building the migration SF we need to know the new stratum offsets */
662: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
663: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
664: for (d=0; d<dim+1; d++) {
665: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
666: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
667: }
668: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
669: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
670: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);
672: /* Count recevied points in each stratum and compute the internal strata shift */
673: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
674: for (d=0; d<dim+1; d++) depthRecv[d]=0;
675: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
676: depthShift[dim] = 0;
677: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
678: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
679: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
680: for (d=0; d<dim+1; d++) {
681: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
682: depthIdx[d] = pStart + depthShift[d];
683: }
685: /* Form the overlap SF build an SF that describes the full overlap migration SF */
686: DMPlexGetChart(dm, &pStart, &pEnd);
687: newLeaves = pEnd - pStart + nleaves;
688: PetscMalloc1(newLeaves, &ilocal);
689: PetscMalloc1(newLeaves, &iremote);
690: /* First map local points to themselves */
691: for (d=0; d<dim+1; d++) {
692: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
693: for (p=pStart; p<pEnd; p++) {
694: point = p + depthShift[d];
695: ilocal[point] = point;
696: iremote[point].index = p;
697: iremote[point].rank = rank;
698: depthIdx[d]++;
699: }
700: }
702: /* Add in the remote roots for currently shared points */
703: DMGetPointSF(dm, &pointSF);
704: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
705: for (d=0; d<dim+1; d++) {
706: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
707: for (p=0; p<numSharedPoints; p++) {
708: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
709: point = sharedLocal[p] + depthShift[d];
710: iremote[point].index = sharedRemote[p].index;
711: iremote[point].rank = sharedRemote[p].rank;
712: }
713: }
714: }
716: /* Now add the incoming overlap points */
717: for (p=0; p<nleaves; p++) {
718: point = depthIdx[remoteDepths[p]];
719: ilocal[point] = point;
720: iremote[point].index = overlapRemote[p].index;
721: iremote[point].rank = overlapRemote[p].rank;
722: depthIdx[remoteDepths[p]]++;
723: }
724: PetscFree2(pointDepths,remoteDepths);
726: PetscSFCreate(comm, migrationSF);
727: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
728: PetscSFSetFromOptions(*migrationSF);
729: DMPlexGetChart(dm, &pStart, &pEnd);
730: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
732: PetscFree3(depthRecv, depthShift, depthIdx);
733: return(0);
734: }
738: /*@
739: DMPlexStratifyMigrationSF - Add partition overlap to a distributed non-overlapping DM.
741: Input Parameter:
742: + dm - The DM
743: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
745: Output Parameter:
746: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
748: Level: developer
750: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
751: @*/
752: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
753: {
754: MPI_Comm comm;
755: PetscMPIInt rank, numProcs;
756: PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
757: PetscInt *pointDepths, *remoteDepths, *ilocal;
758: PetscInt *depthRecv, *depthShift, *depthIdx;
759: const PetscSFNode *iremote;
760: PetscErrorCode ierr;
764: PetscObjectGetComm((PetscObject) dm, &comm);
765: MPI_Comm_rank(comm, &rank);
766: MPI_Comm_size(comm, &numProcs);
767: DMPlexGetDepth(dm, &ldepth);
768: MPI_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
769: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
771: /* Before building the migration SF we need to know the new stratum offsets */
772: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
773: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
774: for (d = 0; d < depth+1; ++d) {
775: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
776: for (p = pStart; p < pEnd; ++p) pointDepths[p] = d;
777: }
778: for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
779: PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
780: PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
781: /* Count recevied points in each stratum and compute the internal strata shift */
782: PetscMalloc3(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx);
783: for (d = 0; d < depth+1; ++d) depthRecv[d] = 0;
784: for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
785: depthShift[depth] = 0;
786: for (d = 0; d < depth; ++d) depthShift[d] = depthRecv[depth];
787: for (d = 1; d < depth; ++d) depthShift[d] += depthRecv[0];
788: for (d = depth-2; d > 0; --d) depthShift[d] += depthRecv[d+1];
789: for (d = 0; d < depth+1; ++d) {depthIdx[d] = 0;}
790: /* Derive a new local permutation based on stratified indices */
791: PetscMalloc1(nleaves, &ilocal);
792: for (p = 0; p < nleaves; ++p) {
793: const PetscInt dep = remoteDepths[p];
795: ilocal[p] = depthShift[dep] + depthIdx[dep];
796: depthIdx[dep]++;
797: }
798: PetscSFCreate(comm, migrationSF);
799: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
800: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
801: PetscFree2(pointDepths,remoteDepths);
802: PetscFree3(depthRecv, depthShift, depthIdx);
803: return(0);
804: }
808: /*@
809: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
811: Collective on DM
813: Input Parameters:
814: + dm - The DMPlex object
815: . pointSF - The PetscSF describing the communication pattern
816: . originalSection - The PetscSection for existing data layout
817: - originalVec - The existing data
819: Output Parameters:
820: + newSection - The PetscSF describing the new data layout
821: - newVec - The new data
823: Level: developer
825: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
826: @*/
827: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
828: {
829: PetscSF fieldSF;
830: PetscInt *remoteOffsets, fieldSize;
831: PetscScalar *originalValues, *newValues;
835: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
836: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
838: PetscSectionGetStorageSize(newSection, &fieldSize);
839: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
840: VecSetType(newVec,dm->vectype);
842: VecGetArray(originalVec, &originalValues);
843: VecGetArray(newVec, &newValues);
844: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
845: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
846: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
847: PetscSFDestroy(&fieldSF);
848: VecRestoreArray(newVec, &newValues);
849: VecRestoreArray(originalVec, &originalValues);
850: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
851: return(0);
852: }
856: /*@
857: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
859: Collective on DM
861: Input Parameters:
862: + dm - The DMPlex object
863: . pointSF - The PetscSF describing the communication pattern
864: . originalSection - The PetscSection for existing data layout
865: - originalIS - The existing data
867: Output Parameters:
868: + newSection - The PetscSF describing the new data layout
869: - newIS - The new data
871: Level: developer
873: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
874: @*/
875: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
876: {
877: PetscSF fieldSF;
878: PetscInt *newValues, *remoteOffsets, fieldSize;
879: const PetscInt *originalValues;
880: PetscErrorCode ierr;
883: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
884: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
886: PetscSectionGetStorageSize(newSection, &fieldSize);
887: PetscMalloc1(fieldSize, &newValues);
889: ISGetIndices(originalIS, &originalValues);
890: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
891: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
892: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
893: PetscSFDestroy(&fieldSF);
894: ISRestoreIndices(originalIS, &originalValues);
895: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
896: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
897: return(0);
898: }
902: /*@
903: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
905: Collective on DM
907: Input Parameters:
908: + dm - The DMPlex object
909: . pointSF - The PetscSF describing the communication pattern
910: . originalSection - The PetscSection for existing data layout
911: . datatype - The type of data
912: - originalData - The existing data
914: Output Parameters:
915: + newSection - The PetscSection describing the new data layout
916: - newData - The new data
918: Level: developer
920: .seealso: DMPlexDistribute(), DMPlexDistributeField()
921: @*/
922: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
923: {
924: PetscSF fieldSF;
925: PetscInt *remoteOffsets, fieldSize;
926: PetscMPIInt dataSize;
930: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
931: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
933: PetscSectionGetStorageSize(newSection, &fieldSize);
934: MPI_Type_size(datatype, &dataSize);
935: PetscMalloc(fieldSize * dataSize, newData);
937: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
938: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
939: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
940: PetscSFDestroy(&fieldSF);
941: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
942: return(0);
943: }
947: PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
948: {
949: DM_Plex *mesh = (DM_Plex*) dm->data;
950: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
951: MPI_Comm comm;
952: PetscSF coneSF;
953: PetscSection originalConeSection, newConeSection;
954: PetscInt *remoteOffsets, *cones, *newCones, newConesSize;
955: PetscBool flg;
956: PetscErrorCode ierr;
961: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
963: /* Distribute cone section */
964: PetscObjectGetComm((PetscObject)dm, &comm);
965: DMPlexGetConeSection(dm, &originalConeSection);
966: DMPlexGetConeSection(dmParallel, &newConeSection);
967: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
968: DMSetUp(dmParallel);
969: {
970: PetscInt pStart, pEnd, p;
972: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
973: for (p = pStart; p < pEnd; ++p) {
974: PetscInt coneSize;
975: PetscSectionGetDof(newConeSection, p, &coneSize);
976: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
977: }
978: }
979: /* Communicate and renumber cones */
980: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
981: DMPlexGetCones(dm, &cones);
982: DMPlexGetCones(dmParallel, &newCones);
983: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
984: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
985: PetscSectionGetStorageSize(newConeSection, &newConesSize);
986: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
987: #if PETSC_USE_DEBUG
988: {
989: PetscInt p;
990: PetscBool valid = PETSC_TRUE;
991: for (p = 0; p < newConesSize; ++p) {
992: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
993: }
994: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
995: }
996: #endif
997: PetscOptionsHasName(((PetscObject) dm)->prefix, "-cones_view", &flg);
998: if (flg) {
999: PetscPrintf(comm, "Serial Cone Section:\n");
1000: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1001: PetscPrintf(comm, "Parallel Cone Section:\n");
1002: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1003: PetscSFView(coneSF, NULL);
1004: }
1005: DMPlexGetConeOrientations(dm, &cones);
1006: DMPlexGetConeOrientations(dmParallel, &newCones);
1007: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1008: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1009: PetscSFDestroy(&coneSF);
1010: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1011: /* Create supports and stratify sieve */
1012: {
1013: PetscInt pStart, pEnd;
1015: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1016: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1017: }
1018: DMPlexSymmetrize(dmParallel);
1019: DMPlexStratify(dmParallel);
1020: pmesh->useCone = mesh->useCone;
1021: pmesh->useClosure = mesh->useClosure;
1022: return(0);
1023: }
1027: PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1028: {
1029: MPI_Comm comm;
1030: PetscSection originalCoordSection, newCoordSection;
1031: Vec originalCoordinates, newCoordinates;
1032: PetscInt bs;
1033: const char *name;
1034: const PetscReal *maxCell, *L;
1035: const DMBoundaryType *bd;
1036: PetscErrorCode ierr;
1042: PetscObjectGetComm((PetscObject)dm, &comm);
1043: DMGetCoordinateSection(dm, &originalCoordSection);
1044: DMGetCoordinateSection(dmParallel, &newCoordSection);
1045: DMGetCoordinatesLocal(dm, &originalCoordinates);
1046: if (originalCoordinates) {
1047: VecCreate(comm, &newCoordinates);
1048: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1049: PetscObjectSetName((PetscObject) newCoordinates, name);
1051: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1052: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1053: VecGetBlockSize(originalCoordinates, &bs);
1054: VecSetBlockSize(newCoordinates, bs);
1055: VecDestroy(&newCoordinates);
1056: }
1057: DMGetPeriodicity(dm, &maxCell, &L, &bd);
1058: if (L) {DMSetPeriodicity(dmParallel, maxCell, L, bd);}
1059: return(0);
1060: }
1064: /* Here we are assuming that process 0 always has everything */
1065: PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1066: {
1067: MPI_Comm comm;
1068: PetscMPIInt rank;
1069: PetscInt numLabels, numLocalLabels, l;
1070: PetscBool hasLabels = PETSC_FALSE;
1076: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1077: PetscObjectGetComm((PetscObject)dm, &comm);
1078: MPI_Comm_rank(comm, &rank);
1080: /* Everyone must have either the same number of labels, or none */
1081: DMPlexGetNumLabels(dm, &numLocalLabels);
1082: numLabels = numLocalLabels;
1083: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1084: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1085: for (l = numLabels-1; l >= 0; --l) {
1086: DMLabel label = NULL, labelNew = NULL;
1087: PetscBool isdepth;
1089: if (hasLabels) {
1090: DMPlexGetLabelByNum(dm, l, &label);
1091: /* Skip "depth" because it is recreated */
1092: PetscStrcmp(label->name, "depth", &isdepth);
1093: }
1094: MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);
1095: if (isdepth) continue;
1096: DMLabelDistribute(label, migrationSF, &labelNew);
1097: DMPlexAddLabel(dmParallel, labelNew);
1098: }
1099: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1100: return(0);
1101: }
1105: PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1106: {
1107: DM_Plex *mesh = (DM_Plex*) dm->data;
1108: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1109: MPI_Comm comm;
1110: const PetscInt *gpoints;
1111: PetscInt dim, depth, n, d;
1112: PetscErrorCode ierr;
1118: PetscObjectGetComm((PetscObject)dm, &comm);
1119: DMGetDimension(dm, &dim);
1121: /* Setup hybrid structure */
1122: for (d = 0; d <= dim; ++d) {pmesh->hybridPointMax[d] = mesh->hybridPointMax[d];}
1123: MPI_Bcast(pmesh->hybridPointMax, dim+1, MPIU_INT, 0, comm);
1124: ISLocalToGlobalMappingGetSize(renumbering, &n);
1125: ISLocalToGlobalMappingGetIndices(renumbering, &gpoints);
1126: DMPlexGetDepth(dm, &depth);
1127: for (d = 0; d <= dim; ++d) {
1128: PetscInt pmax = pmesh->hybridPointMax[d], newmax = 0, pEnd, stratum[2], p;
1130: if (pmax < 0) continue;
1131: DMPlexGetDepthStratum(dm, d > depth ? depth : d, &stratum[0], &stratum[1]);
1132: DMPlexGetDepthStratum(dmParallel, d, NULL, &pEnd);
1133: MPI_Bcast(stratum, 2, MPIU_INT, 0, comm);
1134: for (p = 0; p < n; ++p) {
1135: const PetscInt point = gpoints[p];
1137: if ((point >= stratum[0]) && (point < stratum[1]) && (point >= pmax)) ++newmax;
1138: }
1139: if (newmax > 0) pmesh->hybridPointMax[d] = pEnd - newmax;
1140: else pmesh->hybridPointMax[d] = -1;
1141: }
1142: ISLocalToGlobalMappingRestoreIndices(renumbering, &gpoints);
1143: return(0);
1144: }
1148: PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1149: {
1150: DM_Plex *mesh = (DM_Plex*) dm->data;
1151: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1152: MPI_Comm comm;
1153: DM refTree;
1154: PetscSection origParentSection, newParentSection;
1155: PetscInt *origParents, *origChildIDs;
1156: PetscBool flg;
1157: PetscErrorCode ierr;
1162: PetscObjectGetComm((PetscObject)dm, &comm);
1164: /* Set up tree */
1165: DMPlexGetReferenceTree(dm,&refTree);
1166: DMPlexSetReferenceTree(dmParallel,refTree);
1167: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1168: if (origParentSection) {
1169: PetscInt pStart, pEnd;
1170: PetscInt *newParents, *newChildIDs;
1171: PetscInt *remoteOffsetsParents, newParentSize;
1172: PetscSF parentSF;
1174: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1175: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1176: PetscSectionSetChart(newParentSection,pStart,pEnd);
1177: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1178: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1179: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1180: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1181: PetscSFBcastBegin(parentSF, MPIU_INT, origParents, newParents);
1182: PetscSFBcastEnd(parentSF, MPIU_INT, origParents, newParents);
1183: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1184: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1185: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1186: #if PETSC_USE_DEBUG
1187: {
1188: PetscInt p;
1189: PetscBool valid = PETSC_TRUE;
1190: for (p = 0; p < newParentSize; ++p) {
1191: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1192: }
1193: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1194: }
1195: #endif
1196: PetscOptionsHasName(((PetscObject) dm)->prefix, "-parents_view", &flg);
1197: if (flg) {
1198: PetscPrintf(comm, "Serial Parent Section: \n");
1199: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1200: PetscPrintf(comm, "Parallel Parent Section: \n");
1201: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1202: PetscSFView(parentSF, NULL);
1203: }
1204: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1205: PetscSectionDestroy(&newParentSection);
1206: PetscFree2(newParents,newChildIDs);
1207: PetscSFDestroy(&parentSF);
1208: }
1209: pmesh->useAnchors = mesh->useAnchors;
1210: return(0);
1211: }
1215: PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1216: {
1217: DM_Plex *mesh = (DM_Plex*) dm->data;
1218: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1219: PetscMPIInt rank, numProcs;
1220: MPI_Comm comm;
1221: PetscErrorCode ierr;
1227: /* Create point SF for parallel mesh */
1228: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1229: PetscObjectGetComm((PetscObject)dm, &comm);
1230: MPI_Comm_rank(comm, &rank);
1231: MPI_Comm_size(comm, &numProcs);
1232: {
1233: const PetscInt *leaves;
1234: PetscSFNode *remotePoints, *rowners, *lowners;
1235: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1236: PetscInt pStart, pEnd;
1238: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1239: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1240: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1241: for (p=0; p<numRoots; p++) {
1242: rowners[p].rank = -1;
1243: rowners[p].index = -1;
1244: }
1245: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1246: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1247: for (p = 0; p < numLeaves; ++p) {
1248: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1249: lowners[p].rank = rank;
1250: lowners[p].index = leaves ? leaves[p] : p;
1251: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1252: lowners[p].rank = -2;
1253: lowners[p].index = -2;
1254: }
1255: }
1256: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1257: rowners[p].rank = -3;
1258: rowners[p].index = -3;
1259: }
1260: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1261: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1262: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1263: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1264: for (p = 0; p < numLeaves; ++p) {
1265: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1266: if (lowners[p].rank != rank) ++numGhostPoints;
1267: }
1268: PetscMalloc1(numGhostPoints, &ghostPoints);
1269: PetscMalloc1(numGhostPoints, &remotePoints);
1270: for (p = 0, gp = 0; p < numLeaves; ++p) {
1271: if (lowners[p].rank != rank) {
1272: ghostPoints[gp] = leaves ? leaves[p] : p;
1273: remotePoints[gp].rank = lowners[p].rank;
1274: remotePoints[gp].index = lowners[p].index;
1275: ++gp;
1276: }
1277: }
1278: PetscFree2(rowners,lowners);
1279: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1280: PetscSFSetFromOptions((dmParallel)->sf);
1281: }
1282: pmesh->useCone = mesh->useCone;
1283: pmesh->useClosure = mesh->useClosure;
1284: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1285: return(0);
1286: }
1290: /*@C
1291: DMPlexDerivePointSF - Build a point SF from an SF describing a point migration
1293: Input Parameter:
1294: + dm - The source DMPlex object
1295: . migrationSF - The star forest that describes the parallel point remapping
1296: . ownership - Flag causing a vote to determine point ownership
1298: Output Parameter:
1299: - pointSF - The star forest describing the point overlap in the remapped DM
1301: Level: developer
1303: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1304: @*/
1305: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1306: {
1307: PetscMPIInt rank;
1308: PetscInt p, nroots, nleaves, idx, npointLeaves;
1309: PetscInt *pointLocal;
1310: const PetscInt *leaves;
1311: const PetscSFNode *roots;
1312: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1313: PetscErrorCode ierr;
1317: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1319: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1320: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1321: if (ownership) {
1322: /* Point ownership vote: Process with highest rank ownes shared points */
1323: for (p = 0; p < nleaves; ++p) {
1324: /* Either put in a bid or we know we own it */
1325: leafNodes[p].rank = rank;
1326: leafNodes[p].index = p;
1327: }
1328: for (p = 0; p < nroots; p++) {
1329: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1330: rootNodes[p].rank = -3;
1331: rootNodes[p].index = -3;
1332: }
1333: PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1334: PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1335: } else {
1336: for (p = 0; p < nroots; p++) {
1337: rootNodes[p].index = -1;
1338: rootNodes[p].rank = rank;
1339: };
1340: for (p = 0; p < nleaves; p++) {
1341: /* Write new local id into old location */
1342: if (roots[p].rank == rank) {
1343: rootNodes[roots[p].index].index = leaves[p];
1344: }
1345: }
1346: }
1347: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1348: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1350: for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;}
1351: PetscMalloc1(npointLeaves, &pointLocal);
1352: PetscMalloc1(npointLeaves, &pointRemote);
1353: for (idx = 0, p = 0; p < nleaves; p++) {
1354: if (leafNodes[p].rank != rank) {
1355: pointLocal[idx] = p;
1356: pointRemote[idx] = leafNodes[p];
1357: idx++;
1358: }
1359: }
1360: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1361: PetscSFSetFromOptions(*pointSF);
1362: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1363: PetscFree2(rootNodes, leafNodes);
1364: return(0);
1365: }
1369: /*@C
1370: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1372: Input Parameter:
1373: + dm - The source DMPlex object
1374: . sf - The star forest communication context describing the migration pattern
1376: Output Parameter:
1377: - targetDM - The target DMPlex object
1379: Level: intermediate
1381: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1382: @*/
1383: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1384: {
1385: MPI_Comm comm;
1386: PetscInt dim, nroots;
1387: PetscSF sfPoint;
1388: ISLocalToGlobalMapping ltogMigration;
1389: PetscBool flg;
1390: PetscErrorCode ierr;
1394: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1395: PetscObjectGetComm((PetscObject) dm, &comm);
1396: DMGetDimension(dm, &dim);
1397: DMSetDimension(targetDM, dim);
1399: /* Check for a one-to-all distribution pattern */
1400: DMGetPointSF(dm, &sfPoint);
1401: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1402: if (nroots >= 0) {
1403: IS isOriginal;
1404: ISLocalToGlobalMapping ltogOriginal;
1405: PetscInt n, size, nleaves, conesSize;
1406: PetscInt *numbering_orig, *numbering_new, *cones;
1407: PetscSection coneSection;
1408: /* Get the original point numbering */
1409: DMPlexCreatePointNumbering(dm, &isOriginal);
1410: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1411: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1412: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1413: /* Convert to positive global numbers */
1414: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1415: /* Derive the new local-to-global mapping from the old one */
1416: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1417: PetscMalloc1(nleaves, &numbering_new);
1418: PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1419: PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1420: ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);
1421: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1422: /* Convert cones to global numbering before migrating them */
1423: DMPlexGetConeSection(dm, &coneSection);
1424: PetscSectionGetStorageSize(coneSection, &conesSize);
1425: DMPlexGetCones(dm, &cones);
1426: ISLocalToGlobalMappingApplyBlock(ltogOriginal, conesSize, cones, cones);
1427: ISDestroy(&isOriginal);
1428: ISLocalToGlobalMappingDestroy(<ogOriginal);
1429: } else {
1430: /* One-to-all distribution pattern: We can derive LToG from SF */
1431: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1432: }
1433: PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);
1434: if (flg) {
1435: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1436: ISLocalToGlobalMappingView(ltogMigration, NULL);
1437: }
1438: /* Migrate DM data to target DM */
1439: DMPlexDistributeCones(dm, sf, ltogMigration, targetDM);
1440: DMPlexDistributeCoordinates(dm, sf, targetDM);
1441: DMPlexDistributeLabels(dm, sf, targetDM);
1442: DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1443: DMPlexDistributeSetupTree(dm, sf, ltogMigration, targetDM);
1444: ISLocalToGlobalMappingDestroy(<ogMigration);
1445: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1446: return(0);
1447: }
1451: /*@C
1452: DMPlexDistribute - Distributes the mesh and any associated sections.
1454: Not Collective
1456: Input Parameter:
1457: + dm - The original DMPlex object
1458: - overlap - The overlap of partitions, 0 is the default
1460: Output Parameter:
1461: + sf - The PetscSF used for point distribution
1462: - parallelMesh - The distributed DMPlex object, or NULL
1464: Note: If the mesh was not distributed, the return value is NULL.
1466: The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1467: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1468: representation on the mesh.
1470: Level: intermediate
1472: .keywords: mesh, elements
1473: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1474: @*/
1475: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1476: {
1477: MPI_Comm comm;
1478: PetscPartitioner partitioner;
1479: IS cellPart;
1480: PetscSection cellPartSection;
1481: DM dmCoord;
1482: DMLabel lblPartition, lblMigration;
1483: PetscSF sfProcess, sfMigration, sfStratified, sfPoint;
1484: PetscBool flg;
1485: PetscMPIInt rank, numProcs, p;
1486: PetscErrorCode ierr;
1493: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1494: PetscObjectGetComm((PetscObject)dm,&comm);
1495: MPI_Comm_rank(comm, &rank);
1496: MPI_Comm_size(comm, &numProcs);
1498: *dmParallel = NULL;
1499: if (numProcs == 1) return(0);
1501: /* Create cell partition */
1502: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1503: PetscSectionCreate(comm, &cellPartSection);
1504: DMPlexGetPartitioner(dm, &partitioner);
1505: PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1506: {
1507: /* Convert partition to DMLabel */
1508: PetscInt proc, pStart, pEnd, npoints, poffset;
1509: const PetscInt *points;
1510: DMLabelCreate("Point Partition", &lblPartition);
1511: ISGetIndices(cellPart, &points);
1512: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1513: for (proc = pStart; proc < pEnd; proc++) {
1514: PetscSectionGetDof(cellPartSection, proc, &npoints);
1515: PetscSectionGetOffset(cellPartSection, proc, &poffset);
1516: for (p = poffset; p < poffset+npoints; p++) {
1517: DMLabelSetValue(lblPartition, points[p], proc);
1518: }
1519: }
1520: ISRestoreIndices(cellPart, &points);
1521: }
1522: DMPlexPartitionLabelClosure(dm, lblPartition);
1523: {
1524: /* Build a global process SF */
1525: PetscSFNode *remoteProc;
1526: PetscMalloc1(numProcs, &remoteProc);
1527: for (p = 0; p < numProcs; ++p) {
1528: remoteProc[p].rank = p;
1529: remoteProc[p].index = rank;
1530: }
1531: PetscSFCreate(comm, &sfProcess);
1532: PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1533: PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1534: }
1535: DMLabelCreate("Point migration", &lblMigration);
1536: DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1537: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1538: /* Stratify the SF in case we are migrating an already parallel plex */
1539: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1540: PetscSFDestroy(&sfMigration);
1541: sfMigration = sfStratified;
1542: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1543: PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);
1544: if (flg) {
1545: PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);
1546: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1547: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1548: }
1550: /* Create non-overlapping parallel DM and migrate internal data */
1551: DMPlexCreate(comm, dmParallel);
1552: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1553: DMPlexMigrate(dm, sfMigration, *dmParallel);
1555: /* Build the point SF without overlap */
1556: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1557: DMSetPointSF(*dmParallel, sfPoint);
1558: DMGetCoordinateDM(*dmParallel, &dmCoord);
1559: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1560: if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}
1562: if (overlap > 0) {
1563: DM dmOverlap;
1564: PetscInt nroots, nleaves;
1565: PetscSFNode *newRemote;
1566: const PetscSFNode *oldRemote;
1567: PetscSF sfOverlap, sfOverlapPoint;
1568: /* Add the partition overlap to the distributed DM */
1569: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1570: DMDestroy(dmParallel);
1571: *dmParallel = dmOverlap;
1572: if (flg) {
1573: PetscPrintf(comm, "Overlap Migration SF:\n");
1574: PetscSFView(sfOverlap, NULL);
1575: }
1577: /* Re-map the migration SF to establish the full migration pattern */
1578: PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1579: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1580: PetscMalloc1(nleaves, &newRemote);
1581: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1582: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1583: PetscSFCreate(comm, &sfOverlapPoint);
1584: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1585: PetscSFDestroy(&sfOverlap);
1586: PetscSFDestroy(&sfMigration);
1587: sfMigration = sfOverlapPoint;
1588: }
1589: /* Cleanup Partition */
1590: PetscSFDestroy(&sfProcess);
1591: DMLabelDestroy(&lblPartition);
1592: DMLabelDestroy(&lblMigration);
1593: PetscSectionDestroy(&cellPartSection);
1594: ISDestroy(&cellPart);
1595: /* Copy BC */
1596: DMPlexCopyBoundary(dm, *dmParallel);
1597: /* Cleanup */
1598: if (sf) {*sf = sfMigration;}
1599: else {PetscSFDestroy(&sfMigration);}
1600: PetscSFDestroy(&sfPoint);
1601: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1602: return(0);
1603: }
1607: /*@C
1608: DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM.
1610: Not Collective
1612: Input Parameter:
1613: + dm - The non-overlapping distrbuted DMPlex object
1614: - overlap - The overlap of partitions, 0 is the default
1616: Output Parameter:
1617: + sf - The PetscSF used for point distribution
1618: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1620: Note: If the mesh was not distributed, the return value is NULL.
1622: The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1623: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1624: representation on the mesh.
1626: Level: intermediate
1628: .keywords: mesh, elements
1629: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1630: @*/
1631: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1632: {
1633: MPI_Comm comm;
1634: PetscMPIInt rank;
1635: PetscSection rootSection, leafSection;
1636: IS rootrank, leafrank;
1637: DM dmCoord;
1638: DMLabel lblOverlap;
1639: PetscSF sfOverlap, sfStratified, sfPoint;
1640: PetscErrorCode ierr;
1647: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1648: PetscObjectGetComm((PetscObject)dm,&comm);
1649: MPI_Comm_rank(comm, &rank);
1651: /* Compute point overlap with neighbouring processes on the distributed DM */
1652: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1653: PetscSectionCreate(comm, &rootSection);
1654: PetscSectionCreate(comm, &leafSection);
1655: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1656: DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1657: /* Convert overlap label to stratified migration SF */
1658: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1659: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1660: PetscSFDestroy(&sfOverlap);
1661: sfOverlap = sfStratified;
1662: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1663: PetscSFSetFromOptions(sfOverlap);
1665: PetscSectionDestroy(&rootSection);
1666: PetscSectionDestroy(&leafSection);
1667: ISDestroy(&rootrank);
1668: ISDestroy(&leafrank);
1669: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1671: /* Build the overlapping DM */
1672: DMPlexCreate(comm, dmOverlap);
1673: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1674: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1675: /* Build the new point SF */
1676: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1677: DMSetPointSF(*dmOverlap, sfPoint);
1678: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1679: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1680: PetscSFDestroy(&sfPoint);
1681: /* Cleanup overlap partition */
1682: DMLabelDestroy(&lblOverlap);
1683: if (sf) *sf = sfOverlap;
1684: else {PetscSFDestroy(&sfOverlap);}
1685: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1686: return(0);
1687: }