Actual source code: sfallgatherv.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
5: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
7: {
8: PetscInt i, j, k;
9: const PetscInt *range;
10: PetscMPIInt size;
12: PetscFunctionBegin;
13: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
14: if (nroots) *nroots = sf->nroots;
15: if (nleaves) *nleaves = sf->nleaves;
16: if (ilocal) *ilocal = NULL; /* Contiguous leaves */
17: if (iremote) {
18: if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
19: PetscCall(PetscLayoutGetRanges(sf->map, &range));
20: PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
21: sf->remote_alloc = sf->remote;
22: for (i = 0; i < size; i++) {
23: for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
24: sf->remote[j].rank = i;
25: sf->remote[j].index = k;
26: }
27: }
28: }
29: *iremote = sf->remote;
30: }
31: PetscFunctionReturn(PETSC_SUCCESS);
32: }
34: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
35: {
36: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
37: PetscMPIInt size;
38: PetscInt i;
39: const PetscInt *range;
40: MPI_Comm comm;
42: PetscFunctionBegin;
43: PetscCall(PetscSFSetUp_Allgather(sf));
44: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
45: PetscCallMPI(MPI_Comm_size(comm, &size));
46: if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
47: PetscBool isallgatherv = PETSC_FALSE;
49: PetscCall(PetscMalloc1(size, &dat->recvcounts));
50: PetscCall(PetscMalloc1(size, &dat->displs));
51: PetscCall(PetscLayoutGetRanges(sf->map, &range));
53: for (i = 0; i < size; i++) {
54: PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
55: PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
56: }
58: /* check if we actually have a one-to-all pattern */
59: PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFALLGATHERV, &isallgatherv));
60: if (isallgatherv) {
61: PetscMPIInt rank, nRanksWithZeroRoots;
63: nRanksWithZeroRoots = (sf->nroots == 0) ? 1 : 0; /* I have no roots */
64: PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &nRanksWithZeroRoots, 1, MPI_INT, MPI_SUM, comm));
65: if (nRanksWithZeroRoots == size - 1) { /* Only one rank has roots, which indicates a bcast pattern */
66: dat->bcast_pattern = PETSC_TRUE;
67: PetscCallMPI(MPI_Comm_rank(comm, &rank));
68: dat->bcast_root = sf->nroots > 0 ? rank : -1;
69: PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &dat->bcast_root, 1, MPI_INT, MPI_MAX, comm));
70: }
71: }
72: }
73: PetscFunctionReturn(PETSC_SUCCESS);
74: }
76: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
77: {
78: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
79: PetscSFLink link = dat->avail, next;
81: PetscFunctionBegin;
82: PetscCall(PetscFree(dat->iranks));
83: PetscCall(PetscFree(dat->ioffset));
84: PetscCall(PetscFree(dat->irootloc));
85: PetscCall(PetscFree(dat->recvcounts));
86: PetscCall(PetscFree(dat->displs));
87: PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
88: for (; link; link = next) {
89: next = link->next;
90: PetscCall(PetscSFLinkDestroy(sf, link));
91: }
92: dat->avail = NULL;
93: PetscFunctionReturn(PETSC_SUCCESS);
94: }
96: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
97: {
98: PetscFunctionBegin;
99: PetscCall(PetscSFReset_Allgatherv(sf));
100: PetscCall(PetscFree(sf->data));
101: PetscFunctionReturn(PETSC_SUCCESS);
102: }
104: static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
105: {
106: PetscSFLink link;
107: PetscMPIInt sendcount, rank;
108: MPI_Comm comm;
109: void *rootbuf = NULL, *leafbuf = NULL;
110: MPI_Request *req;
111: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
113: PetscFunctionBegin;
114: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
115: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
116: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
117: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
118: PetscCallMPI(MPI_Comm_rank(comm, &rank));
119: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
120: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
122: if (dat->bcast_pattern && rank == dat->bcast_root) PetscCall((*link->Memcpy)(link, link->leafmtype_mpi, leafbuf, link->rootmtype_mpi, rootbuf, (size_t)sendcount * link->unitbytes));
123: /* Ready the buffers for MPI */
124: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
125: if (dat->bcast_pattern) PetscCallMPI(MPIU_Ibcast(leafbuf, sf->nleaves, unit, dat->bcast_root, comm, req));
126: else PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
127: PetscFunctionReturn(PETSC_SUCCESS);
128: }
130: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
131: {
132: PetscSFLink link;
133: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
134: PetscInt rstart;
135: PetscMPIInt rank, count, recvcount;
136: MPI_Comm comm;
137: void *rootbuf = NULL, *leafbuf = NULL;
138: MPI_Request *req;
140: PetscFunctionBegin;
141: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
142: if (op == MPI_REPLACE) {
143: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
144: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
145: PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
146: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
147: } else {
148: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
149: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
150: PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
151: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
152: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
153: if (dat->bcast_pattern) {
154: #if defined(PETSC_HAVE_OMPI_MAJOR_VERSION) /* Workaround: cuda-aware Open MPI 4.1.3 does not support MPI_Ireduce() with device buffers */
155: *req = MPI_REQUEST_NULL; /* Set NULL so that we can safely MPI_Wait(req) */
156: PetscCallMPI(MPI_Reduce(leafbuf, rootbuf, sf->nleaves, unit, op, dat->bcast_root, comm));
157: #else
158: PetscCallMPI(MPIU_Ireduce(leafbuf, rootbuf, sf->nleaves, unit, op, dat->bcast_root, comm, req));
159: #endif
160: } else { /* Reduce leafdata, then scatter to rootdata */
161: PetscCallMPI(MPI_Comm_rank(comm, &rank));
162: PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
163: /* Allocate a separate leaf buffer on rank 0 */
164: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
165: PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
166: }
167: /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
168: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
169: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
170: PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
171: PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
172: }
173: }
174: PetscFunctionReturn(PETSC_SUCCESS);
175: }
177: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
178: {
179: PetscSFLink link;
181: PetscFunctionBegin;
182: if (op == MPI_REPLACE) {
183: /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
184: we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
185: of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
186: It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
187: PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
188: */
189: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
190: PetscCall(PetscSFLinkReclaim(sf, &link));
191: } else {
192: PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
193: }
194: PetscFunctionReturn(PETSC_SUCCESS);
195: }
197: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
198: {
199: PetscSFLink link;
200: PetscMPIInt rank;
202: PetscFunctionBegin;
203: PetscCall(PetscSFBcastBegin_Gatherv(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE));
204: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
205: PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
206: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
207: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
208: PetscCall(PetscSFLinkReclaim(sf, &link));
209: PetscFunctionReturn(PETSC_SUCCESS);
210: }
212: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
214: Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
215: to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
216: in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
217: root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank
218: 0,1,2 is 1,3,6 respectively. root is 10.
220: We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
221: rank-0 rank-1 rank-2
222: Root 1
223: Leaf 2 3 4
224: Leafupdate 2 3 4
226: Do MPI_Exscan on leafupdate,
227: rank-0 rank-1 rank-2
228: Root 1
229: Leaf 2 3 4
230: Leafupdate 2 2 5
232: BcastAndOp from root to leafupdate,
233: rank-0 rank-1 rank-2
234: Root 1
235: Leaf 2 3 4
236: Leafupdate 3 3 6
238: Copy root to leafupdate on rank-0
239: rank-0 rank-1 rank-2
240: Root 1
241: Leaf 2 3 4
242: Leafupdate 1 3 6
244: Reduce from leaf to root,
245: rank-0 rank-1 rank-2
246: Root 10
247: Leaf 2 3 4
248: Leafupdate 1 3 6
249: */
250: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
251: {
252: PetscSFLink link;
253: MPI_Comm comm;
254: PetscMPIInt count;
256: PetscFunctionBegin;
257: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
258: PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
259: /* Copy leafdata to leafupdate */
260: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
261: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
262: PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
263: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
265: /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
266: if (op == MPI_REPLACE) {
267: PetscMPIInt size, rank, prev, next;
268: PetscCallMPI(MPI_Comm_rank(comm, &rank));
269: PetscCallMPI(MPI_Comm_size(comm, &size));
270: prev = rank ? rank - 1 : MPI_PROC_NULL;
271: next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
272: PetscCall(PetscMPIIntCast(sf->nleaves, &count));
273: PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
274: } else {
275: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
276: PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
277: }
278: PetscCall(PetscSFLinkReclaim(sf, &link));
279: PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
280: PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
282: /* Bcast roots to rank 0's leafupdate */
283: PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
285: /* Reduce leafdata to rootdata */
286: PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
287: PetscFunctionReturn(PETSC_SUCCESS);
288: }
290: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
291: {
292: PetscFunctionBegin;
293: PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
294: PetscFunctionReturn(PETSC_SUCCESS);
295: }
297: /* Get root ranks accessing my leaves */
298: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
299: {
300: PetscInt i, j, k, size;
301: const PetscInt *range;
303: PetscFunctionBegin;
304: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
305: if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
306: size = sf->nranks;
307: PetscCall(PetscLayoutGetRanges(sf->map, &range));
308: PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
309: for (i = 0; i < size; i++) sf->ranks[i] = i;
310: PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
311: for (i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
312: for (i = 0; i < size; i++) {
313: for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
314: }
315: }
317: if (nranks) *nranks = sf->nranks;
318: if (ranks) *ranks = sf->ranks;
319: if (roffset) *roffset = sf->roffset;
320: if (rmine) *rmine = sf->rmine;
321: if (rremote) *rremote = sf->rremote;
322: PetscFunctionReturn(PETSC_SUCCESS);
323: }
325: /* Get leaf ranks accessing my roots */
326: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
327: {
328: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
329: MPI_Comm comm;
330: PetscMPIInt size, rank;
331: PetscInt i, j;
333: PetscFunctionBegin;
334: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
335: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
336: PetscCallMPI(MPI_Comm_size(comm, &size));
337: PetscCallMPI(MPI_Comm_rank(comm, &rank));
338: if (niranks) *niranks = size;
340: /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
341: sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
342: */
343: if (iranks) {
344: if (!dat->iranks) {
345: PetscCall(PetscMalloc1(size, &dat->iranks));
346: dat->iranks[0] = rank;
347: for (i = 0, j = 1; i < size; i++) {
348: if (i == rank) continue;
349: dat->iranks[j++] = i;
350: }
351: }
352: *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNew */
353: }
355: if (ioffset) {
356: if (!dat->ioffset) {
357: PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
358: for (i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
359: }
360: *ioffset = dat->ioffset;
361: }
363: if (irootloc) {
364: if (!dat->irootloc) {
365: PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
366: for (i = 0; i < size; i++) {
367: for (j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
368: }
369: }
370: *irootloc = dat->irootloc;
371: }
372: PetscFunctionReturn(PETSC_SUCCESS);
373: }
375: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
376: {
377: PetscInt i, nroots, nleaves, rstart, *ilocal;
378: PetscSFNode *iremote;
379: PetscSF lsf;
381: PetscFunctionBegin;
382: nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
383: nroots = nleaves;
384: PetscCall(PetscMalloc1(nleaves, &ilocal));
385: PetscCall(PetscMalloc1(nleaves, &iremote));
386: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
388: for (i = 0; i < nleaves; i++) {
389: ilocal[i] = rstart + i; /* lsf does not change leave indices */
390: iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */
391: iremote[i].index = i; /* root index */
392: }
394: PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
395: PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
396: PetscCall(PetscSFSetUp(lsf));
397: *out = lsf;
398: PetscFunctionReturn(PETSC_SUCCESS);
399: }
401: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
402: {
403: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
405: PetscFunctionBegin;
406: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
407: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
409: sf->ops->SetUp = PetscSFSetUp_Allgatherv;
410: sf->ops->Reset = PetscSFReset_Allgatherv;
411: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
412: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
413: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
414: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
415: sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv;
416: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv;
417: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
418: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
419: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
420: sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv;
422: PetscCall(PetscNew(&dat));
423: dat->bcast_root = -1;
424: sf->data = (void *)dat;
425: PetscFunctionReturn(PETSC_SUCCESS);
426: }