Actual source code: plexdistribute.c
petsc-3.8.4 2018-03-24
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsc/private/dmlabelimpl.h>
4: /*@C
5: DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
7: Input Parameters:
8: + dm - The DM object
9: . user - The user callback, may be NULL (to clear the callback)
10: - ctx - context for callback evaluation, may be NULL
12: Level: advanced
14: Notes:
15: The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
17: Any setting here overrides other configuration of DMPlex adjacency determination.
19: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20: @*/
21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22: {
23: DM_Plex *mesh = (DM_Plex *)dm->data;
27: mesh->useradjacency = user;
28: mesh->useradjacencyctx = ctx;
29: return(0);
30: }
32: /*@C
33: DMPlexGetAdjacencyUser - get the user-defined adjacency callback
35: Input Parameter:
36: . dm - The DM object
38: Output Parameters:
39: - user - The user callback
40: - ctx - context for callback evaluation
42: Level: advanced
44: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45: @*/
46: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47: {
48: DM_Plex *mesh = (DM_Plex *)dm->data;
52: if (user) *user = mesh->useradjacency;
53: if (ctx) *ctx = mesh->useradjacencyctx;
54: return(0);
55: }
57: /*@
58: DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first
60: Input Parameters:
61: + dm - The DM object
62: - useCone - Flag to use the cone first
64: Level: intermediate
66: Notes:
67: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
68: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
69: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
71: .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
72: @*/
73: PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone)
74: {
75: DM_Plex *mesh = (DM_Plex *) dm->data;
79: mesh->useCone = useCone;
80: return(0);
81: }
83: /*@
84: DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first
86: Input Parameter:
87: . dm - The DM object
89: Output Parameter:
90: . useCone - Flag to use the cone first
92: Level: intermediate
94: Notes:
95: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
96: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
97: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
99: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator()
100: @*/
101: PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone)
102: {
103: DM_Plex *mesh = (DM_Plex *) dm->data;
108: *useCone = mesh->useCone;
109: return(0);
110: }
112: /*@
113: DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure
115: Input Parameters:
116: + dm - The DM object
117: - useClosure - Flag to use the closure
119: Level: intermediate
121: Notes:
122: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
123: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
124: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
126: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
127: @*/
128: PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure)
129: {
130: DM_Plex *mesh = (DM_Plex *) dm->data;
134: mesh->useClosure = useClosure;
135: return(0);
136: }
138: /*@
139: DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure
141: Input Parameter:
142: . dm - The DM object
144: Output Parameter:
145: . useClosure - Flag to use the closure
147: Level: intermediate
149: Notes:
150: $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE
151: $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE
152: $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE
154: .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator()
155: @*/
156: PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure)
157: {
158: DM_Plex *mesh = (DM_Plex *) dm->data;
163: *useClosure = mesh->useClosure;
164: return(0);
165: }
167: /*@
168: DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
170: Input Parameters:
171: + dm - The DM object
172: - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
174: Level: intermediate
176: .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
177: @*/
178: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
179: {
180: DM_Plex *mesh = (DM_Plex *) dm->data;
184: mesh->useAnchors = useAnchors;
185: return(0);
186: }
188: /*@
189: DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
191: Input Parameter:
192: . dm - The DM object
194: Output Parameter:
195: . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
197: Level: intermediate
199: .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
200: @*/
201: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
202: {
203: DM_Plex *mesh = (DM_Plex *) dm->data;
208: *useAnchors = mesh->useAnchors;
209: return(0);
210: }
212: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
213: {
214: const PetscInt *cone = NULL;
215: PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
216: PetscErrorCode ierr;
219: DMPlexGetConeSize(dm, p, &coneSize);
220: DMPlexGetCone(dm, p, &cone);
221: for (c = 0; c <= coneSize; ++c) {
222: const PetscInt point = !c ? p : cone[c-1];
223: const PetscInt *support = NULL;
224: PetscInt supportSize, s, q;
226: DMPlexGetSupportSize(dm, point, &supportSize);
227: DMPlexGetSupport(dm, point, &support);
228: for (s = 0; s < supportSize; ++s) {
229: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
230: if (support[s] == adj[q]) break;
231: }
232: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
233: }
234: }
235: *adjSize = numAdj;
236: return(0);
237: }
239: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
240: {
241: const PetscInt *support = NULL;
242: PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
243: PetscErrorCode ierr;
246: DMPlexGetSupportSize(dm, p, &supportSize);
247: DMPlexGetSupport(dm, p, &support);
248: for (s = 0; s <= supportSize; ++s) {
249: const PetscInt point = !s ? p : support[s-1];
250: const PetscInt *cone = NULL;
251: PetscInt coneSize, c, q;
253: DMPlexGetConeSize(dm, point, &coneSize);
254: DMPlexGetCone(dm, point, &cone);
255: for (c = 0; c < coneSize; ++c) {
256: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
257: if (cone[c] == adj[q]) break;
258: }
259: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
260: }
261: }
262: *adjSize = numAdj;
263: return(0);
264: }
266: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
267: {
268: PetscInt *star = NULL;
269: PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s;
273: DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
274: for (s = 0; s < starSize*2; s += 2) {
275: const PetscInt *closure = NULL;
276: PetscInt closureSize, c, q;
278: DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
279: for (c = 0; c < closureSize*2; c += 2) {
280: for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
281: if (closure[c] == adj[q]) break;
282: }
283: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
284: }
285: DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);
286: }
287: DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
288: *adjSize = numAdj;
289: return(0);
290: }
292: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
293: {
294: static PetscInt asiz = 0;
295: PetscInt maxAnchors = 1;
296: PetscInt aStart = -1, aEnd = -1;
297: PetscInt maxAdjSize;
298: PetscSection aSec = NULL;
299: IS aIS = NULL;
300: const PetscInt *anchors;
301: DM_Plex *mesh = (DM_Plex *)dm->data;
302: PetscErrorCode ierr;
305: if (useAnchors) {
306: DMPlexGetAnchors(dm,&aSec,&aIS);
307: if (aSec) {
308: PetscSectionGetMaxDof(aSec,&maxAnchors);
309: maxAnchors = PetscMax(1,maxAnchors);
310: PetscSectionGetChart(aSec,&aStart,&aEnd);
311: ISGetIndices(aIS,&anchors);
312: }
313: }
314: if (!*adj) {
315: PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
317: DMPlexGetChart(dm, &pStart,&pEnd);
318: DMPlexGetDepth(dm, &depth);
319: DMPlexGetMaxSizes(dm, &maxC, &maxS);
320: coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
321: supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
322: asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
323: asiz *= maxAnchors;
324: asiz = PetscMin(asiz,pEnd-pStart);
325: PetscMalloc1(asiz,adj);
326: }
327: if (*adjSize < 0) *adjSize = asiz;
328: maxAdjSize = *adjSize;
329: if (mesh->useradjacency) {
330: mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);
331: } else if (useTransitiveClosure) {
332: DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
333: } else if (useCone) {
334: DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
335: } else {
336: DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
337: }
338: if (useAnchors && aSec) {
339: PetscInt origSize = *adjSize;
340: PetscInt numAdj = origSize;
341: PetscInt i = 0, j;
342: PetscInt *orig = *adj;
344: while (i < origSize) {
345: PetscInt p = orig[i];
346: PetscInt aDof = 0;
348: if (p >= aStart && p < aEnd) {
349: PetscSectionGetDof(aSec,p,&aDof);
350: }
351: if (aDof) {
352: PetscInt aOff;
353: PetscInt s, q;
355: for (j = i + 1; j < numAdj; j++) {
356: orig[j - 1] = orig[j];
357: }
358: origSize--;
359: numAdj--;
360: PetscSectionGetOffset(aSec,p,&aOff);
361: for (s = 0; s < aDof; ++s) {
362: for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
363: if (anchors[aOff+s] == orig[q]) break;
364: }
365: if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
366: }
367: }
368: else {
369: i++;
370: }
371: }
372: *adjSize = numAdj;
373: ISRestoreIndices(aIS,&anchors);
374: }
375: return(0);
376: }
378: /*@
379: DMPlexGetAdjacency - Return all points adjacent to the given point
381: Input Parameters:
382: + dm - The DM object
383: . p - The point
384: . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE
385: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize
387: Output Parameters:
388: + adjSize - The number of adjacent points
389: - adj - The adjacent points
391: Level: advanced
393: Notes: The user must PetscFree the adj array if it was not passed in.
395: .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
396: @*/
397: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
398: {
399: DM_Plex *mesh = (DM_Plex *) dm->data;
406: DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);
407: return(0);
408: }
410: /*@
411: DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
413: Collective on DM
415: Input Parameters:
416: + dm - The DM
417: - sfPoint - The PetscSF which encodes point connectivity
419: Output Parameters:
420: + processRanks - A list of process neighbors, or NULL
421: - sfProcess - An SF encoding the two-sided process connectivity, or NULL
423: Level: developer
425: .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
426: @*/
427: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
428: {
429: const PetscSFNode *remotePoints;
430: PetscInt *localPointsNew;
431: PetscSFNode *remotePointsNew;
432: const PetscInt *nranks;
433: PetscInt *ranksNew;
434: PetscBT neighbors;
435: PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n;
436: PetscMPIInt size, proc, rank;
437: PetscErrorCode ierr;
444: MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);
445: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
446: PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
447: PetscBTCreate(size, &neighbors);
448: PetscBTMemzero(size, neighbors);
449: /* Compute root-to-leaf process connectivity */
450: PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
451: ISGetIndices(rootRanks, &nranks);
452: for (p = pStart; p < pEnd; ++p) {
453: PetscInt ndof, noff, n;
455: PetscSectionGetDof(rootRankSection, p, &ndof);
456: PetscSectionGetOffset(rootRankSection, p, &noff);
457: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
458: }
459: ISRestoreIndices(rootRanks, &nranks);
460: /* Compute leaf-to-neighbor process connectivity */
461: PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
462: ISGetIndices(leafRanks, &nranks);
463: for (p = pStart; p < pEnd; ++p) {
464: PetscInt ndof, noff, n;
466: PetscSectionGetDof(leafRankSection, p, &ndof);
467: PetscSectionGetOffset(leafRankSection, p, &noff);
468: for (n = 0; n < ndof; ++n) {PetscBTSet(neighbors, nranks[noff+n]);}
469: }
470: ISRestoreIndices(leafRanks, &nranks);
471: /* Compute leaf-to-root process connectivity */
472: for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
473: /* Calculate edges */
474: PetscBTClear(neighbors, rank);
475: for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
476: PetscMalloc1(numNeighbors, &ranksNew);
477: PetscMalloc1(numNeighbors, &localPointsNew);
478: PetscMalloc1(numNeighbors, &remotePointsNew);
479: for(proc = 0, n = 0; proc < size; ++proc) {
480: if (PetscBTLookup(neighbors, proc)) {
481: ranksNew[n] = proc;
482: localPointsNew[n] = proc;
483: remotePointsNew[n].index = rank;
484: remotePointsNew[n].rank = proc;
485: ++n;
486: }
487: }
488: PetscBTDestroy(&neighbors);
489: if (processRanks) {ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);}
490: else {PetscFree(ranksNew);}
491: if (sfProcess) {
492: PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
493: PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");
494: PetscSFSetFromOptions(*sfProcess);
495: PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
496: }
497: return(0);
498: }
500: /*@
501: DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
503: Collective on DM
505: Input Parameter:
506: . dm - The DM
508: Output Parameters:
509: + rootSection - The number of leaves for a given root point
510: . rootrank - The rank of each edge into the root point
511: . leafSection - The number of processes sharing a given leaf point
512: - leafrank - The rank of each process sharing a leaf point
514: Level: developer
516: .seealso: DMPlexCreateOverlap()
517: @*/
518: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
519: {
520: MPI_Comm comm;
521: PetscSF sfPoint;
522: const PetscInt *rootdegree;
523: PetscInt *myrank, *remoterank;
524: PetscInt pStart, pEnd, p, nedges;
525: PetscMPIInt rank;
526: PetscErrorCode ierr;
529: PetscObjectGetComm((PetscObject) dm, &comm);
530: MPI_Comm_rank(comm, &rank);
531: DMPlexGetChart(dm, &pStart, &pEnd);
532: DMGetPointSF(dm, &sfPoint);
533: /* Compute number of leaves for each root */
534: PetscObjectSetName((PetscObject) rootSection, "Root Section");
535: PetscSectionSetChart(rootSection, pStart, pEnd);
536: PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
537: PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
538: for (p = pStart; p < pEnd; ++p) {PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);}
539: PetscSectionSetUp(rootSection);
540: /* Gather rank of each leaf to root */
541: PetscSectionGetStorageSize(rootSection, &nedges);
542: PetscMalloc1(pEnd-pStart, &myrank);
543: PetscMalloc1(nedges, &remoterank);
544: for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
545: PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
546: PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
547: PetscFree(myrank);
548: ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
549: /* Distribute remote ranks to leaves */
550: PetscObjectSetName((PetscObject) leafSection, "Leaf Section");
551: DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
552: return(0);
553: }
555: /*@C
556: DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF.
558: Collective on DM
560: Input Parameters:
561: + dm - The DM
562: . levels - Number of overlap levels
563: . rootSection - The number of leaves for a given root point
564: . rootrank - The rank of each edge into the root point
565: . leafSection - The number of processes sharing a given leaf point
566: - leafrank - The rank of each process sharing a leaf point
568: Output Parameters:
569: + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings
571: Level: developer
573: .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
574: @*/
575: PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
576: {
577: MPI_Comm comm;
578: DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
579: PetscSF sfPoint, sfProc;
580: const PetscSFNode *remote;
581: const PetscInt *local;
582: const PetscInt *nrank, *rrank;
583: PetscInt *adj = NULL;
584: PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l;
585: PetscMPIInt rank, size;
586: PetscBool useCone, useClosure, flg;
587: PetscErrorCode ierr;
590: PetscObjectGetComm((PetscObject) dm, &comm);
591: MPI_Comm_size(comm, &size);
592: MPI_Comm_rank(comm, &rank);
593: DMGetPointSF(dm, &sfPoint);
594: DMPlexGetChart(dm, &pStart, &pEnd);
595: PetscSectionGetChart(leafSection, &sStart, &sEnd);
596: PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
597: DMLabelCreate("Overlap adjacency", &ovAdjByRank);
598: /* Handle leaves: shared with the root point */
599: for (l = 0; l < nleaves; ++l) {
600: PetscInt adjSize = PETSC_DETERMINE, a;
602: DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);
603: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);}
604: }
605: ISGetIndices(rootrank, &rrank);
606: ISGetIndices(leafrank, &nrank);
607: /* Handle roots */
608: for (p = pStart; p < pEnd; ++p) {
609: PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
611: if ((p >= sStart) && (p < sEnd)) {
612: /* Some leaves share a root with other leaves on different processes */
613: PetscSectionGetDof(leafSection, p, &neighbors);
614: if (neighbors) {
615: PetscSectionGetOffset(leafSection, p, &noff);
616: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
617: for (n = 0; n < neighbors; ++n) {
618: const PetscInt remoteRank = nrank[noff+n];
620: if (remoteRank == rank) continue;
621: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
622: }
623: }
624: }
625: /* Roots are shared with leaves */
626: PetscSectionGetDof(rootSection, p, &neighbors);
627: if (!neighbors) continue;
628: PetscSectionGetOffset(rootSection, p, &noff);
629: DMPlexGetAdjacency(dm, p, &adjSize, &adj);
630: for (n = 0; n < neighbors; ++n) {
631: const PetscInt remoteRank = rrank[noff+n];
633: if (remoteRank == rank) continue;
634: for (a = 0; a < adjSize; ++a) {DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);}
635: }
636: }
637: PetscFree(adj);
638: ISRestoreIndices(rootrank, &rrank);
639: ISRestoreIndices(leafrank, &nrank);
640: /* Add additional overlap levels */
641: for (l = 1; l < levels; l++) {
642: /* Propagate point donations over SF to capture remote connections */
643: DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
644: /* Add next level of point donations to the label */
645: DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
646: }
647: /* We require the closure in the overlap */
648: DMPlexGetAdjacencyUseCone(dm, &useCone);
649: DMPlexGetAdjacencyUseClosure(dm, &useClosure);
650: if (useCone || !useClosure) {
651: DMPlexPartitionLabelClosure(dm, ovAdjByRank);
652: }
653: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);
654: if (flg) {
655: DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);
656: }
657: /* Make global process SF and invert sender to receiver label */
658: {
659: /* Build a global process SF */
660: PetscSFNode *remoteProc;
661: PetscMalloc1(size, &remoteProc);
662: for (p = 0; p < size; ++p) {
663: remoteProc[p].rank = p;
664: remoteProc[p].index = rank;
665: }
666: PetscSFCreate(comm, &sfProc);
667: PetscObjectSetName((PetscObject) sfProc, "Process SF");
668: PetscSFSetGraph(sfProc, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
669: }
670: DMLabelCreate("Overlap label", ovLabel);
671: DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);
672: /* Add owned points, except for shared local points */
673: for (p = pStart; p < pEnd; ++p) {DMLabelSetValue(*ovLabel, p, rank);}
674: for (l = 0; l < nleaves; ++l) {
675: DMLabelClearValue(*ovLabel, local[l], rank);
676: DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
677: }
678: /* Clean up */
679: DMLabelDestroy(&ovAdjByRank);
680: PetscSFDestroy(&sfProc);
681: return(0);
682: }
684: /*@C
685: DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
687: Collective on DM
689: Input Parameters:
690: + dm - The DM
691: - overlapSF - The SF mapping ghost points in overlap to owner points on other processes
693: Output Parameters:
694: + migrationSF - An SF that maps original points in old locations to points in new locations
696: Level: developer
698: .seealso: DMPlexCreateOverlap(), DMPlexDistribute()
699: @*/
700: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
701: {
702: MPI_Comm comm;
703: PetscMPIInt rank, size;
704: PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
705: PetscInt *pointDepths, *remoteDepths, *ilocal;
706: PetscInt *depthRecv, *depthShift, *depthIdx;
707: PetscSFNode *iremote;
708: PetscSF pointSF;
709: const PetscInt *sharedLocal;
710: const PetscSFNode *overlapRemote, *sharedRemote;
711: PetscErrorCode ierr;
715: PetscObjectGetComm((PetscObject)dm, &comm);
716: MPI_Comm_rank(comm, &rank);
717: MPI_Comm_size(comm, &size);
718: DMGetDimension(dm, &dim);
720: /* Before building the migration SF we need to know the new stratum offsets */
721: PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
722: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
723: for (d=0; d<dim+1; d++) {
724: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
725: for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
726: }
727: for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
728: PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);
729: PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);
731: /* Count recevied points in each stratum and compute the internal strata shift */
732: PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);
733: for (d=0; d<dim+1; d++) depthRecv[d]=0;
734: for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
735: depthShift[dim] = 0;
736: for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
737: for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
738: for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
739: for (d=0; d<dim+1; d++) {
740: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
741: depthIdx[d] = pStart + depthShift[d];
742: }
744: /* Form the overlap SF build an SF that describes the full overlap migration SF */
745: DMPlexGetChart(dm, &pStart, &pEnd);
746: newLeaves = pEnd - pStart + nleaves;
747: PetscMalloc1(newLeaves, &ilocal);
748: PetscMalloc1(newLeaves, &iremote);
749: /* First map local points to themselves */
750: for (d=0; d<dim+1; d++) {
751: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
752: for (p=pStart; p<pEnd; p++) {
753: point = p + depthShift[d];
754: ilocal[point] = point;
755: iremote[point].index = p;
756: iremote[point].rank = rank;
757: depthIdx[d]++;
758: }
759: }
761: /* Add in the remote roots for currently shared points */
762: DMGetPointSF(dm, &pointSF);
763: PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
764: for (d=0; d<dim+1; d++) {
765: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
766: for (p=0; p<numSharedPoints; p++) {
767: if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
768: point = sharedLocal[p] + depthShift[d];
769: iremote[point].index = sharedRemote[p].index;
770: iremote[point].rank = sharedRemote[p].rank;
771: }
772: }
773: }
775: /* Now add the incoming overlap points */
776: for (p=0; p<nleaves; p++) {
777: point = depthIdx[remoteDepths[p]];
778: ilocal[point] = point;
779: iremote[point].index = overlapRemote[p].index;
780: iremote[point].rank = overlapRemote[p].rank;
781: depthIdx[remoteDepths[p]]++;
782: }
783: PetscFree2(pointDepths,remoteDepths);
785: PetscSFCreate(comm, migrationSF);
786: PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");
787: PetscSFSetFromOptions(*migrationSF);
788: DMPlexGetChart(dm, &pStart, &pEnd);
789: PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
791: PetscFree3(depthRecv, depthShift, depthIdx);
792: return(0);
793: }
795: /*@
796: DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
798: Input Parameter:
799: + dm - The DM
800: - sf - A star forest with non-ordered leaves, usually defining a DM point migration
802: Output Parameter:
803: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
805: Level: developer
807: .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
808: @*/
809: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
810: {
811: MPI_Comm comm;
812: PetscMPIInt rank, size;
813: PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves;
814: PetscInt *pointDepths, *remoteDepths, *ilocal;
815: PetscInt *depthRecv, *depthShift, *depthIdx;
816: PetscInt hybEnd[4];
817: const PetscSFNode *iremote;
818: PetscErrorCode ierr;
822: PetscObjectGetComm((PetscObject) dm, &comm);
823: MPI_Comm_rank(comm, &rank);
824: MPI_Comm_size(comm, &size);
825: DMPlexGetDepth(dm, &ldepth);
826: MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
827: if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
829: /* Before building the migration SF we need to know the new stratum offsets */
830: PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
831: PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
832: DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);
833: for (d = 0; d < depth+1; ++d) {
834: DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
835: for (p = pStart; p < pEnd; ++p) {
836: if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */
837: pointDepths[p] = 2 * d;
838: } else {
839: pointDepths[p] = 2 * d + 1;
840: }
841: }
842: }
843: for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1;
844: PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);
845: PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);
846: /* Count recevied points in each stratum and compute the internal strata shift */
847: PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);
848: for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0;
849: for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++;
850: depthShift[2*depth+1] = 0;
851: for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1];
852: for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth];
853: depthShift[0] += depthRecv[1];
854: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1];
855: for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0];
856: for (d = 2 * depth-1; d > 2; --d) {
857: PetscInt e;
859: for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d];
860: }
861: for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;}
862: /* Derive a new local permutation based on stratified indices */
863: PetscMalloc1(nleaves, &ilocal);
864: for (p = 0; p < nleaves; ++p) {
865: const PetscInt dep = remoteDepths[p];
867: ilocal[p] = depthShift[dep] + depthIdx[dep];
868: depthIdx[dep]++;
869: }
870: PetscSFCreate(comm, migrationSF);
871: PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");
872: PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);
873: PetscFree2(pointDepths,remoteDepths);
874: PetscFree3(depthRecv, depthShift, depthIdx);
875: return(0);
876: }
878: /*@
879: DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
881: Collective on DM
883: Input Parameters:
884: + dm - The DMPlex object
885: . pointSF - The PetscSF describing the communication pattern
886: . originalSection - The PetscSection for existing data layout
887: - originalVec - The existing data
889: Output Parameters:
890: + newSection - The PetscSF describing the new data layout
891: - newVec - The new data
893: Level: developer
895: .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
896: @*/
897: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
898: {
899: PetscSF fieldSF;
900: PetscInt *remoteOffsets, fieldSize;
901: PetscScalar *originalValues, *newValues;
905: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
906: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
908: PetscSectionGetStorageSize(newSection, &fieldSize);
909: VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
910: VecSetType(newVec,dm->vectype);
912: VecGetArray(originalVec, &originalValues);
913: VecGetArray(newVec, &newValues);
914: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
915: PetscFree(remoteOffsets);
916: PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);
917: PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);
918: PetscSFDestroy(&fieldSF);
919: VecRestoreArray(newVec, &newValues);
920: VecRestoreArray(originalVec, &originalValues);
921: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
922: return(0);
923: }
925: /*@
926: DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
928: Collective on DM
930: Input Parameters:
931: + dm - The DMPlex object
932: . pointSF - The PetscSF describing the communication pattern
933: . originalSection - The PetscSection for existing data layout
934: - originalIS - The existing data
936: Output Parameters:
937: + newSection - The PetscSF describing the new data layout
938: - newIS - The new data
940: Level: developer
942: .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
943: @*/
944: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
945: {
946: PetscSF fieldSF;
947: PetscInt *newValues, *remoteOffsets, fieldSize;
948: const PetscInt *originalValues;
949: PetscErrorCode ierr;
952: PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);
953: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
955: PetscSectionGetStorageSize(newSection, &fieldSize);
956: PetscMalloc1(fieldSize, &newValues);
958: ISGetIndices(originalIS, &originalValues);
959: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
960: PetscFree(remoteOffsets);
961: PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
962: PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);
963: PetscSFDestroy(&fieldSF);
964: ISRestoreIndices(originalIS, &originalValues);
965: ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
966: PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);
967: return(0);
968: }
970: /*@
971: DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
973: Collective on DM
975: Input Parameters:
976: + dm - The DMPlex object
977: . pointSF - The PetscSF describing the communication pattern
978: . originalSection - The PetscSection for existing data layout
979: . datatype - The type of data
980: - originalData - The existing data
982: Output Parameters:
983: + newSection - The PetscSection describing the new data layout
984: - newData - The new data
986: Level: developer
988: .seealso: DMPlexDistribute(), DMPlexDistributeField()
989: @*/
990: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
991: {
992: PetscSF fieldSF;
993: PetscInt *remoteOffsets, fieldSize;
994: PetscMPIInt dataSize;
998: PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);
999: PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);
1001: PetscSectionGetStorageSize(newSection, &fieldSize);
1002: MPI_Type_size(datatype, &dataSize);
1003: PetscMalloc(fieldSize * dataSize, newData);
1005: PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
1006: PetscFree(remoteOffsets);
1007: PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);
1008: PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);
1009: PetscSFDestroy(&fieldSF);
1010: PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);
1011: return(0);
1012: }
1014: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1015: {
1016: DM_Plex *mesh = (DM_Plex*) dm->data;
1017: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1018: MPI_Comm comm;
1019: PetscSF coneSF;
1020: PetscSection originalConeSection, newConeSection;
1021: PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1022: PetscBool flg;
1023: PetscErrorCode ierr;
1028: PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);
1030: /* Distribute cone section */
1031: PetscObjectGetComm((PetscObject)dm, &comm);
1032: DMPlexGetConeSection(dm, &originalConeSection);
1033: DMPlexGetConeSection(dmParallel, &newConeSection);
1034: PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
1035: DMSetUp(dmParallel);
1036: {
1037: PetscInt pStart, pEnd, p;
1039: PetscSectionGetChart(newConeSection, &pStart, &pEnd);
1040: for (p = pStart; p < pEnd; ++p) {
1041: PetscInt coneSize;
1042: PetscSectionGetDof(newConeSection, p, &coneSize);
1043: pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
1044: }
1045: }
1046: /* Communicate and renumber cones */
1047: PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
1048: PetscFree(remoteOffsets);
1049: DMPlexGetCones(dm, &cones);
1050: if (original) {
1051: PetscInt numCones;
1053: PetscSectionGetStorageSize(originalConeSection,&numCones); PetscMalloc1(numCones,&globCones);
1054: ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
1055: }
1056: else {
1057: globCones = cones;
1058: }
1059: DMPlexGetCones(dmParallel, &newCones);
1060: PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);
1061: PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);
1062: if (original) {
1063: PetscFree(globCones);
1064: }
1065: PetscSectionGetStorageSize(newConeSection, &newConesSize);
1066: ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1067: #if PETSC_USE_DEBUG
1068: {
1069: PetscInt p;
1070: PetscBool valid = PETSC_TRUE;
1071: for (p = 0; p < newConesSize; ++p) {
1072: if (newCones[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1073: }
1074: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1075: }
1076: #endif
1077: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);
1078: if (flg) {
1079: PetscPrintf(comm, "Serial Cone Section:\n");
1080: PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);
1081: PetscPrintf(comm, "Parallel Cone Section:\n");
1082: PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);
1083: PetscSFView(coneSF, NULL);
1084: }
1085: DMPlexGetConeOrientations(dm, &cones);
1086: DMPlexGetConeOrientations(dmParallel, &newCones);
1087: PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);
1088: PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);
1089: PetscSFDestroy(&coneSF);
1090: PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);
1091: /* Create supports and stratify DMPlex */
1092: {
1093: PetscInt pStart, pEnd;
1095: PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1096: PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1097: }
1098: DMPlexSymmetrize(dmParallel);
1099: DMPlexStratify(dmParallel);
1100: pmesh->useCone = mesh->useCone;
1101: pmesh->useClosure = mesh->useClosure;
1102: return(0);
1103: }
1105: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1106: {
1107: MPI_Comm comm;
1108: PetscSection originalCoordSection, newCoordSection;
1109: Vec originalCoordinates, newCoordinates;
1110: PetscInt bs;
1111: PetscBool isper;
1112: const char *name;
1113: const PetscReal *maxCell, *L;
1114: const DMBoundaryType *bd;
1115: PetscErrorCode ierr;
1121: PetscObjectGetComm((PetscObject)dm, &comm);
1122: DMGetCoordinateSection(dm, &originalCoordSection);
1123: DMGetCoordinateSection(dmParallel, &newCoordSection);
1124: DMGetCoordinatesLocal(dm, &originalCoordinates);
1125: if (originalCoordinates) {
1126: VecCreate(PETSC_COMM_SELF, &newCoordinates);
1127: PetscObjectGetName((PetscObject) originalCoordinates, &name);
1128: PetscObjectSetName((PetscObject) newCoordinates, name);
1130: DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1131: DMSetCoordinatesLocal(dmParallel, newCoordinates);
1132: VecGetBlockSize(originalCoordinates, &bs);
1133: VecSetBlockSize(newCoordinates, bs);
1134: VecDestroy(&newCoordinates);
1135: }
1136: DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);
1137: DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);
1138: return(0);
1139: }
1141: /* Here we are assuming that process 0 always has everything */
1142: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1143: {
1144: DM_Plex *mesh = (DM_Plex*) dm->data;
1145: MPI_Comm comm;
1146: DMLabel depthLabel;
1147: PetscMPIInt rank;
1148: PetscInt depth, d, numLabels, numLocalLabels, l;
1149: PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1150: PetscObjectState depthState = -1;
1151: PetscErrorCode ierr;
1156: PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);
1157: PetscObjectGetComm((PetscObject)dm, &comm);
1158: MPI_Comm_rank(comm, &rank);
1160: /* If the user has changed the depth label, communicate it instead */
1161: DMPlexGetDepth(dm, &depth);
1162: DMPlexGetDepthLabel(dm, &depthLabel);
1163: if (depthLabel) {DMLabelGetState(depthLabel, &depthState);}
1164: lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1165: MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1166: if (sendDepth) {
1167: DMRemoveLabel(dmParallel, "depth", &depthLabel);
1168: DMLabelDestroy(&depthLabel);
1169: }
1170: /* Everyone must have either the same number of labels, or none */
1171: DMGetNumLabels(dm, &numLocalLabels);
1172: numLabels = numLocalLabels;
1173: MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1174: if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1175: for (l = numLabels-1; l >= 0; --l) {
1176: DMLabel label = NULL, labelNew = NULL;
1177: PetscBool isdepth;
1179: if (hasLabels) {
1180: DMGetLabelByNum(dm, l, &label);
1181: /* Skip "depth" because it is recreated */
1182: PetscStrcmp(label->name, "depth", &isdepth);
1183: }
1184: MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);
1185: if (isdepth && !sendDepth) continue;
1186: DMLabelDistribute(label, migrationSF, &labelNew);
1187: if (isdepth) {
1188: /* Put in any missing strata which can occur if users are managing the depth label themselves */
1189: PetscInt gdepth;
1191: MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1192: if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1193: for (d = 0; d <= gdepth; ++d) {
1194: PetscBool has;
1196: DMLabelHasStratum(labelNew, d, &has);
1197: if (!has) DMLabelAddStratum(labelNew, d);
1198: }
1199: }
1200: DMAddLabel(dmParallel, labelNew);
1201: }
1202: PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);
1203: return(0);
1204: }
1206: static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel)
1207: {
1208: DM_Plex *mesh = (DM_Plex*) dm->data;
1209: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1210: PetscBool *isHybrid, *isHybridParallel;
1211: PetscInt dim, depth, d;
1212: PetscInt pStart, pEnd, pStartP, pEndP;
1213: PetscErrorCode ierr;
1219: DMGetDimension(dm, &dim);
1220: DMPlexGetDepth(dm, &depth);
1221: DMPlexGetChart(dm,&pStart,&pEnd);
1222: DMPlexGetChart(dmParallel,&pStartP,&pEndP);
1223: PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);
1224: for (d = 0; d <= depth; d++) {
1225: PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d];
1227: if (hybridMax >= 0) {
1228: PetscInt sStart, sEnd, p;
1230: DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);
1231: for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE;
1232: }
1233: }
1234: PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1235: PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);
1236: for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1;
1237: for (d = 0; d <= depth; d++) {
1238: PetscInt sStart, sEnd, p, dd;
1240: DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);
1241: dd = (depth == 1 && d == 1) ? dim : d;
1242: for (p = sStart; p < sEnd; p++) {
1243: if (isHybridParallel[p-pStartP]) {
1244: pmesh->hybridPointMax[dd] = p;
1245: break;
1246: }
1247: }
1248: }
1249: PetscFree2(isHybrid,isHybridParallel);
1250: return(0);
1251: }
1253: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1254: {
1255: DM_Plex *mesh = (DM_Plex*) dm->data;
1256: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1257: MPI_Comm comm;
1258: DM refTree;
1259: PetscSection origParentSection, newParentSection;
1260: PetscInt *origParents, *origChildIDs;
1261: PetscBool flg;
1262: PetscErrorCode ierr;
1267: PetscObjectGetComm((PetscObject)dm, &comm);
1269: /* Set up tree */
1270: DMPlexGetReferenceTree(dm,&refTree);
1271: DMPlexSetReferenceTree(dmParallel,refTree);
1272: DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);
1273: if (origParentSection) {
1274: PetscInt pStart, pEnd;
1275: PetscInt *newParents, *newChildIDs, *globParents;
1276: PetscInt *remoteOffsetsParents, newParentSize;
1277: PetscSF parentSF;
1279: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1280: PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);
1281: PetscSectionSetChart(newParentSection,pStart,pEnd);
1282: PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1283: PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1284: PetscFree(remoteOffsetsParents);
1285: PetscSectionGetStorageSize(newParentSection,&newParentSize);
1286: PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);
1287: if (original) {
1288: PetscInt numParents;
1290: PetscSectionGetStorageSize(origParentSection,&numParents);
1291: PetscMalloc1(numParents,&globParents);
1292: ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1293: }
1294: else {
1295: globParents = origParents;
1296: }
1297: PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);
1298: PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);
1299: if (original) {
1300: PetscFree(globParents);
1301: }
1302: PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1303: PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);
1304: ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1305: #if PETSC_USE_DEBUG
1306: {
1307: PetscInt p;
1308: PetscBool valid = PETSC_TRUE;
1309: for (p = 0; p < newParentSize; ++p) {
1310: if (newParents[p] < 0) {valid = PETSC_FALSE; PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);}
1311: }
1312: if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1313: }
1314: #endif
1315: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);
1316: if (flg) {
1317: PetscPrintf(comm, "Serial Parent Section: \n");
1318: PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);
1319: PetscPrintf(comm, "Parallel Parent Section: \n");
1320: PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);
1321: PetscSFView(parentSF, NULL);
1322: }
1323: DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);
1324: PetscSectionDestroy(&newParentSection);
1325: PetscFree2(newParents,newChildIDs);
1326: PetscSFDestroy(&parentSF);
1327: }
1328: pmesh->useAnchors = mesh->useAnchors;
1329: return(0);
1330: }
1332: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1333: {
1334: DM_Plex *mesh = (DM_Plex*) dm->data;
1335: DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data;
1336: PetscMPIInt rank, size;
1337: MPI_Comm comm;
1338: PetscErrorCode ierr;
1344: /* Create point SF for parallel mesh */
1345: PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);
1346: PetscObjectGetComm((PetscObject)dm, &comm);
1347: MPI_Comm_rank(comm, &rank);
1348: MPI_Comm_size(comm, &size);
1349: {
1350: const PetscInt *leaves;
1351: PetscSFNode *remotePoints, *rowners, *lowners;
1352: PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1353: PetscInt pStart, pEnd;
1355: DMPlexGetChart(dmParallel, &pStart, &pEnd);
1356: PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1357: PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);
1358: for (p=0; p<numRoots; p++) {
1359: rowners[p].rank = -1;
1360: rowners[p].index = -1;
1361: }
1362: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1363: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1364: for (p = 0; p < numLeaves; ++p) {
1365: if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1366: lowners[p].rank = rank;
1367: lowners[p].index = leaves ? leaves[p] : p;
1368: } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1369: lowners[p].rank = -2;
1370: lowners[p].index = -2;
1371: }
1372: }
1373: for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1374: rowners[p].rank = -3;
1375: rowners[p].index = -3;
1376: }
1377: PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1378: PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1379: PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);
1380: PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);
1381: for (p = 0; p < numLeaves; ++p) {
1382: if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1383: if (lowners[p].rank != rank) ++numGhostPoints;
1384: }
1385: PetscMalloc1(numGhostPoints, &ghostPoints);
1386: PetscMalloc1(numGhostPoints, &remotePoints);
1387: for (p = 0, gp = 0; p < numLeaves; ++p) {
1388: if (lowners[p].rank != rank) {
1389: ghostPoints[gp] = leaves ? leaves[p] : p;
1390: remotePoints[gp].rank = lowners[p].rank;
1391: remotePoints[gp].index = lowners[p].index;
1392: ++gp;
1393: }
1394: }
1395: PetscFree2(rowners,lowners);
1396: PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1397: PetscSFSetFromOptions((dmParallel)->sf);
1398: }
1399: pmesh->useCone = mesh->useCone;
1400: pmesh->useClosure = mesh->useClosure;
1401: PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);
1402: return(0);
1403: }
1405: /*@C
1406: DMPlexDerivePointSF - Build a point SF from an SF describing a point migration
1408: Input Parameter:
1409: + dm - The source DMPlex object
1410: . migrationSF - The star forest that describes the parallel point remapping
1411: . ownership - Flag causing a vote to determine point ownership
1413: Output Parameter:
1414: - pointSF - The star forest describing the point overlap in the remapped DM
1416: Level: developer
1418: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1419: @*/
1420: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1421: {
1422: PetscMPIInt rank;
1423: PetscInt p, nroots, nleaves, idx, npointLeaves;
1424: PetscInt *pointLocal;
1425: const PetscInt *leaves;
1426: const PetscSFNode *roots;
1427: PetscSFNode *rootNodes, *leafNodes, *pointRemote;
1428: PetscErrorCode ierr;
1432: MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);
1434: PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1435: PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1436: if (ownership) {
1437: /* Point ownership vote: Process with highest rank ownes shared points */
1438: for (p = 0; p < nleaves; ++p) {
1439: /* Either put in a bid or we know we own it */
1440: leafNodes[p].rank = rank;
1441: leafNodes[p].index = p;
1442: }
1443: for (p = 0; p < nroots; p++) {
1444: /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1445: rootNodes[p].rank = -3;
1446: rootNodes[p].index = -3;
1447: }
1448: PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1449: PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);
1450: } else {
1451: for (p = 0; p < nroots; p++) {
1452: rootNodes[p].index = -1;
1453: rootNodes[p].rank = rank;
1454: };
1455: for (p = 0; p < nleaves; p++) {
1456: /* Write new local id into old location */
1457: if (roots[p].rank == rank) {
1458: rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1459: }
1460: }
1461: }
1462: PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1463: PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);
1465: for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;}
1466: PetscMalloc1(npointLeaves, &pointLocal);
1467: PetscMalloc1(npointLeaves, &pointRemote);
1468: for (idx = 0, p = 0; p < nleaves; p++) {
1469: if (leafNodes[p].rank != rank) {
1470: pointLocal[idx] = p;
1471: pointRemote[idx] = leafNodes[p];
1472: idx++;
1473: }
1474: }
1475: PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);
1476: PetscSFSetFromOptions(*pointSF);
1477: PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1478: PetscFree2(rootNodes, leafNodes);
1479: return(0);
1480: }
1482: /*@C
1483: DMPlexMigrate - Migrates internal DM data over the supplied star forest
1485: Input Parameter:
1486: + dm - The source DMPlex object
1487: . sf - The star forest communication context describing the migration pattern
1489: Output Parameter:
1490: - targetDM - The target DMPlex object
1492: Level: intermediate
1494: .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1495: @*/
1496: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1497: {
1498: MPI_Comm comm;
1499: PetscInt dim, nroots;
1500: PetscSF sfPoint;
1501: ISLocalToGlobalMapping ltogMigration;
1502: ISLocalToGlobalMapping ltogOriginal = NULL;
1503: PetscBool flg;
1504: PetscErrorCode ierr;
1508: PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1509: PetscObjectGetComm((PetscObject) dm, &comm);
1510: DMGetDimension(dm, &dim);
1511: DMSetDimension(targetDM, dim);
1513: /* Check for a one-to-all distribution pattern */
1514: DMGetPointSF(dm, &sfPoint);
1515: PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1516: if (nroots >= 0) {
1517: IS isOriginal;
1518: PetscInt n, size, nleaves;
1519: PetscInt *numbering_orig, *numbering_new;
1520: /* Get the original point numbering */
1521: DMPlexCreatePointNumbering(dm, &isOriginal);
1522: ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);
1523: ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1524: ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1525: /* Convert to positive global numbers */
1526: for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1527: /* Derive the new local-to-global mapping from the old one */
1528: PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1529: PetscMalloc1(nleaves, &numbering_new);
1530: PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1531: PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);
1532: ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);
1533: ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);
1534: ISDestroy(&isOriginal);
1535: } else {
1536: /* One-to-all distribution pattern: We can derive LToG from SF */
1537: ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);
1538: }
1539: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1540: if (flg) {
1541: PetscPrintf(comm, "Point renumbering for DM migration:\n");
1542: ISLocalToGlobalMappingView(ltogMigration, NULL);
1543: }
1544: /* Migrate DM data to target DM */
1545: DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1546: DMPlexDistributeLabels(dm, sf, targetDM);
1547: DMPlexDistributeCoordinates(dm, sf, targetDM);
1548: DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);
1549: DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1550: ISLocalToGlobalMappingDestroy(<ogOriginal);
1551: ISLocalToGlobalMappingDestroy(<ogMigration);
1552: PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1553: return(0);
1554: }
1556: /*@C
1557: DMPlexDistribute - Distributes the mesh and any associated sections.
1559: Not Collective
1561: Input Parameter:
1562: + dm - The original DMPlex object
1563: - overlap - The overlap of partitions, 0 is the default
1565: Output Parameter:
1566: + sf - The PetscSF used for point distribution
1567: - parallelMesh - The distributed DMPlex object, or NULL
1569: Note: If the mesh was not distributed, the return value is NULL.
1571: The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and
1572: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1573: representation on the mesh.
1575: Level: intermediate
1577: .keywords: mesh, elements
1578: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1579: @*/
1580: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1581: {
1582: MPI_Comm comm;
1583: PetscPartitioner partitioner;
1584: IS cellPart;
1585: PetscSection cellPartSection;
1586: DM dmCoord;
1587: DMLabel lblPartition, lblMigration;
1588: PetscSF sfProcess, sfMigration, sfStratified, sfPoint;
1589: PetscBool flg;
1590: PetscMPIInt rank, size, p;
1591: PetscErrorCode ierr;
1598: PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);
1599: PetscObjectGetComm((PetscObject)dm,&comm);
1600: MPI_Comm_rank(comm, &rank);
1601: MPI_Comm_size(comm, &size);
1603: if (sf) *sf = NULL;
1604: *dmParallel = NULL;
1605: if (size == 1) {
1606: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1607: return(0);
1608: }
1610: /* Create cell partition */
1611: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1612: PetscSectionCreate(comm, &cellPartSection);
1613: DMPlexGetPartitioner(dm, &partitioner);
1614: PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);
1615: {
1616: /* Convert partition to DMLabel */
1617: PetscInt proc, pStart, pEnd, npoints, poffset;
1618: const PetscInt *points;
1619: DMLabelCreate("Point Partition", &lblPartition);
1620: ISGetIndices(cellPart, &points);
1621: PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1622: for (proc = pStart; proc < pEnd; proc++) {
1623: PetscSectionGetDof(cellPartSection, proc, &npoints);
1624: PetscSectionGetOffset(cellPartSection, proc, &poffset);
1625: for (p = poffset; p < poffset+npoints; p++) {
1626: DMLabelSetValue(lblPartition, points[p], proc);
1627: }
1628: }
1629: ISRestoreIndices(cellPart, &points);
1630: }
1631: DMPlexPartitionLabelClosure(dm, lblPartition);
1632: {
1633: /* Build a global process SF */
1634: PetscSFNode *remoteProc;
1635: PetscMalloc1(size, &remoteProc);
1636: for (p = 0; p < size; ++p) {
1637: remoteProc[p].rank = p;
1638: remoteProc[p].index = rank;
1639: }
1640: PetscSFCreate(comm, &sfProcess);
1641: PetscObjectSetName((PetscObject) sfProcess, "Process SF");
1642: PetscSFSetGraph(sfProcess, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);
1643: }
1644: DMLabelCreate("Point migration", &lblMigration);
1645: DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);
1646: DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1647: /* Stratify the SF in case we are migrating an already parallel plex */
1648: DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1649: PetscSFDestroy(&sfMigration);
1650: sfMigration = sfStratified;
1651: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1652: PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);
1653: if (flg) {
1654: DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);
1655: PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);
1656: }
1658: /* Create non-overlapping parallel DM and migrate internal data */
1659: DMPlexCreate(comm, dmParallel);
1660: PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");
1661: DMPlexMigrate(dm, sfMigration, *dmParallel);
1663: /* Build the point SF without overlap */
1664: DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1665: DMSetPointSF(*dmParallel, sfPoint);
1666: DMGetCoordinateDM(*dmParallel, &dmCoord);
1667: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1668: if (flg) {PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);}
1670: if (overlap > 0) {
1671: DM dmOverlap;
1672: PetscInt nroots, nleaves;
1673: PetscSFNode *newRemote;
1674: const PetscSFNode *oldRemote;
1675: PetscSF sfOverlap, sfOverlapPoint;
1676: /* Add the partition overlap to the distributed DM */
1677: DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1678: DMDestroy(dmParallel);
1679: *dmParallel = dmOverlap;
1680: if (flg) {
1681: PetscPrintf(comm, "Overlap Migration SF:\n");
1682: PetscSFView(sfOverlap, NULL);
1683: }
1685: /* Re-map the migration SF to establish the full migration pattern */
1686: PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);
1687: PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1688: PetscMalloc1(nleaves, &newRemote);
1689: PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1690: PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);
1691: PetscSFCreate(comm, &sfOverlapPoint);
1692: PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1693: PetscSFDestroy(&sfOverlap);
1694: PetscSFDestroy(&sfMigration);
1695: sfMigration = sfOverlapPoint;
1696: }
1697: /* Cleanup Partition */
1698: PetscSFDestroy(&sfProcess);
1699: DMLabelDestroy(&lblPartition);
1700: DMLabelDestroy(&lblMigration);
1701: PetscSectionDestroy(&cellPartSection);
1702: ISDestroy(&cellPart);
1703: /* Copy BC */
1704: DMCopyBoundary(dm, *dmParallel);
1705: /* Create sfNatural */
1706: if (dm->useNatural) {
1707: PetscSection section;
1709: DMGetDefaultSection(dm, §ion);
1710: DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1711: }
1712: /* Cleanup */
1713: if (sf) {*sf = sfMigration;}
1714: else {PetscSFDestroy(&sfMigration);}
1715: PetscSFDestroy(&sfPoint);
1716: PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);
1717: return(0);
1718: }
1720: /*@C
1721: DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1723: Not Collective
1725: Input Parameter:
1726: + dm - The non-overlapping distrbuted DMPlex object
1727: - overlap - The overlap of partitions, 0 is the default
1729: Output Parameter:
1730: + sf - The PetscSF used for point distribution
1731: - dmOverlap - The overlapping distributed DMPlex object, or NULL
1733: Note: If the mesh was not distributed, the return value is NULL.
1735: The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and
1736: DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function
1737: representation on the mesh.
1739: Level: intermediate
1741: .keywords: mesh, elements
1742: .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure()
1743: @*/
1744: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1745: {
1746: MPI_Comm comm;
1747: PetscMPIInt size, rank;
1748: PetscSection rootSection, leafSection;
1749: IS rootrank, leafrank;
1750: DM dmCoord;
1751: DMLabel lblOverlap;
1752: PetscSF sfOverlap, sfStratified, sfPoint;
1753: PetscErrorCode ierr;
1760: PetscObjectGetComm((PetscObject)dm,&comm);
1761: MPI_Comm_size(comm, &size);
1762: MPI_Comm_rank(comm, &rank);
1763: if (size == 1) {*dmOverlap = NULL; return(0);}
1764: PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1766: /* Compute point overlap with neighbouring processes on the distributed DM */
1767: PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);
1768: PetscSectionCreate(comm, &rootSection);
1769: PetscSectionCreate(comm, &leafSection);
1770: DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
1771: DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
1772: /* Convert overlap label to stratified migration SF */
1773: DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
1774: DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
1775: PetscSFDestroy(&sfOverlap);
1776: sfOverlap = sfStratified;
1777: PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");
1778: PetscSFSetFromOptions(sfOverlap);
1780: PetscSectionDestroy(&rootSection);
1781: PetscSectionDestroy(&leafSection);
1782: ISDestroy(&rootrank);
1783: ISDestroy(&leafrank);
1784: PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);
1786: /* Build the overlapping DM */
1787: DMPlexCreate(comm, dmOverlap);
1788: PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");
1789: DMPlexMigrate(dm, sfOverlap, *dmOverlap);
1790: /* Build the new point SF */
1791: DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
1792: DMSetPointSF(*dmOverlap, sfPoint);
1793: DMGetCoordinateDM(*dmOverlap, &dmCoord);
1794: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1795: PetscSFDestroy(&sfPoint);
1796: /* Cleanup overlap partition */
1797: DMLabelDestroy(&lblOverlap);
1798: if (sf) *sf = sfOverlap;
1799: else {PetscSFDestroy(&sfOverlap);}
1800: PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
1801: return(0);
1802: }
1804: /*@C
1805: DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1806: root process of the original's communicator.
1808: Input Parameters:
1809: . dm - the original DMPlex object
1811: Output Parameters:
1812: . gatherMesh - the gathered DM object, or NULL
1814: Level: intermediate
1816: .keywords: mesh
1817: .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1818: @*/
1819: PetscErrorCode DMPlexGetGatherDM(DM dm, DM * gatherMesh)
1820: {
1821: MPI_Comm comm;
1822: PetscMPIInt size;
1823: PetscPartitioner oldPart, gatherPart;
1828: comm = PetscObjectComm((PetscObject)dm);
1829: MPI_Comm_size(comm,&size);
1830: *gatherMesh = NULL;
1831: if (size == 1) return(0);
1832: DMPlexGetPartitioner(dm,&oldPart);
1833: PetscObjectReference((PetscObject)oldPart);
1834: PetscPartitionerCreate(comm,&gatherPart);
1835: PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);
1836: DMPlexSetPartitioner(dm,gatherPart);
1837: DMPlexDistribute(dm,0,NULL,gatherMesh);
1838: DMPlexSetPartitioner(dm,oldPart);
1839: PetscPartitionerDestroy(&gatherPart);
1840: PetscPartitionerDestroy(&oldPart);
1841: return(0);
1842: }
1844: /*@C
1845: DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1847: Input Parameters:
1848: . dm - the original DMPlex object
1850: Output Parameters:
1851: . redundantMesh - the redundant DM object, or NULL
1853: Level: intermediate
1855: .keywords: mesh
1856: .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1857: @*/
1858: PetscErrorCode DMPlexGetRedundantDM(DM dm, DM * redundantMesh)
1859: {
1860: MPI_Comm comm;
1861: PetscMPIInt size, rank;
1862: PetscInt pStart, pEnd, p;
1863: PetscInt numPoints = -1;
1864: PetscSF migrationSF, sfPoint;
1865: DM gatherDM, dmCoord;
1866: PetscSFNode *points;
1871: comm = PetscObjectComm((PetscObject)dm);
1872: MPI_Comm_size(comm,&size);
1873: *redundantMesh = NULL;
1874: if (size == 1) {
1875: PetscObjectReference((PetscObject) dm);
1876: *redundantMesh = dm;
1877: return(0);
1878: }
1879: DMPlexGetGatherDM(dm,&gatherDM);
1880: if (!gatherDM) return(0);
1881: MPI_Comm_rank(comm,&rank);
1882: DMPlexGetChart(gatherDM,&pStart,&pEnd);
1883: numPoints = pEnd - pStart;
1884: MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);
1885: PetscMalloc1(numPoints,&points);
1886: PetscSFCreate(comm,&migrationSF);
1887: for (p = 0; p < numPoints; p++) {
1888: points[p].index = p;
1889: points[p].rank = 0;
1890: }
1891: PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);
1892: DMPlexCreate(comm, redundantMesh);
1893: PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");
1894: DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
1895: DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
1896: DMSetPointSF(*redundantMesh, sfPoint);
1897: DMGetCoordinateDM(*redundantMesh, &dmCoord);
1898: if (dmCoord) {DMSetPointSF(dmCoord, sfPoint);}
1899: PetscSFDestroy(&sfPoint);
1900: PetscSFDestroy(&migrationSF);
1901: DMDestroy(&gatherDM);
1902: return(0);
1903: }