Actual source code: sfallgatherv.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
5: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
7: {
8: PetscInt i,j,k;
9: const PetscInt *range;
10: PetscMPIInt size;
12: MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
13: if (nroots) *nroots = sf->nroots;
14: if (nleaves) *nleaves = sf->nleaves;
15: if (ilocal) *ilocal = NULL; /* Contiguous leaves */
16: if (iremote) {
17: if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
18: PetscLayoutGetRanges(sf->map,&range);
19: PetscMalloc1(sf->nleaves,&sf->remote);
20: sf->remote_alloc = sf->remote;
21: for (i=0; i<size; i++) {
22: for (j=range[i],k=0; j<range[i+1]; j++,k++) {
23: sf->remote[j].rank = i;
24: sf->remote[j].index = k;
25: }
26: }
27: }
28: *iremote = sf->remote;
29: }
30: return 0;
31: }
33: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
34: {
35: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
36: PetscMPIInt size;
37: PetscInt i;
38: const PetscInt *range;
40: PetscSFSetUp_Allgather(sf);
41: MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
42: if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
43: PetscMalloc1(size,&dat->recvcounts);
44: PetscMalloc1(size,&dat->displs);
45: PetscLayoutGetRanges(sf->map,&range);
47: for (i=0; i<size; i++) {
48: PetscMPIIntCast(range[i],&dat->displs[i]);
49: PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);
50: }
51: }
52: return 0;
53: }
55: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
56: {
57: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
58: PetscSFLink link = dat->avail,next;
60: PetscFree(dat->iranks);
61: PetscFree(dat->ioffset);
62: PetscFree(dat->irootloc);
63: PetscFree(dat->recvcounts);
64: PetscFree(dat->displs);
66: for (; link; link=next) {next = link->next; PetscSFLinkDestroy(sf,link);}
67: dat->avail = NULL;
68: return 0;
69: }
71: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
72: {
73: PetscSFReset_Allgatherv(sf);
74: PetscFree(sf->data);
75: return 0;
76: }
78: static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
79: {
80: PetscSFLink link;
81: PetscMPIInt sendcount;
82: MPI_Comm comm;
83: void *rootbuf = NULL,*leafbuf = NULL;
84: MPI_Request *req;
85: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
87: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);
88: PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);
89: PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
90: PetscObjectGetComm((PetscObject)sf,&comm);
91: PetscMPIIntCast(sf->nroots,&sendcount);
92: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_../../../../../..2LEAF,&rootbuf,&leafbuf,&req,NULL);
93: PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_../../../../../..2LEAF);
94: MPIU_Iallgatherv(rootbuf,sendcount,unit,leafbuf,dat->recvcounts,dat->displs,unit,comm,req);
95: return 0;
96: }
98: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
99: {
100: PetscSFLink link;
101: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
102: PetscInt rstart;
103: PetscMPIInt rank,count,recvcount;
104: MPI_Comm comm;
105: void *rootbuf = NULL,*leafbuf = NULL;
106: MPI_Request *req;
108: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);
109: if (op == MPI_REPLACE) {
110: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
111: PetscLayoutGetRange(sf->map,&rstart,NULL);
112: (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
113: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) (*link->SyncStream)(link);
114: } else {
115: /* Reduce leafdata, then scatter to rootdata */
116: PetscObjectGetComm((PetscObject)sf,&comm);
117: MPI_Comm_rank(comm,&rank);
118: PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);
119: PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
120: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2../../../../../..,&rootbuf,&leafbuf,&req,NULL);
121: PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);
122: /* Allocate a separate leaf buffer on rank 0 */
123: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
124: PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);
125: }
126: /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
127: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
128: PetscMPIIntCast(sf->nleaves*link->bs,&count);
129: PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2../../../../../..);
130: MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */
131: MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],dat->recvcounts,dat->displs,unit,rootbuf,recvcount,unit,0,comm,req);
132: }
133: return 0;
134: }
136: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
137: {
138: PetscSFLink link;
140: if (op == MPI_REPLACE) {
141: /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
142: we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
143: of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
144: It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
145: PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
146: */
147: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
148: PetscSFLinkReclaim(sf,&link);
149: } else {
150: PetscSFReduceEnd_Basic(sf,unit,leafdata,rootdata,op);
151: }
152: return 0;
153: }
155: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
156: {
157: PetscSFLink link;
158: PetscMPIInt rank;
160: PetscSFBcastBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE);
161: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
162: PetscSFLinkFinishCommunication(sf,link,PETSCSF_../../../../../..2LEAF);
163: MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
164: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
165: (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);
166: }
167: PetscSFLinkReclaim(sf,&link);
168: return 0;
169: }
171: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
173: Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
174: to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
175: in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
176: root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank
177: 0,1,2 is 1,3,6 respectively. root is 10.
179: We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
180: rank-0 rank-1 rank-2
181: Root 1
182: Leaf 2 3 4
183: Leafupdate 2 3 4
185: Do MPI_Exscan on leafupdate,
186: rank-0 rank-1 rank-2
187: Root 1
188: Leaf 2 3 4
189: Leafupdate 2 2 5
191: BcastAndOp from root to leafupdate,
192: rank-0 rank-1 rank-2
193: Root 1
194: Leaf 2 3 4
195: Leafupdate 3 3 6
197: Copy root to leafupdate on rank-0
198: rank-0 rank-1 rank-2
199: Root 1
200: Leaf 2 3 4
201: Leafupdate 1 3 6
203: Reduce from leaf to root,
204: rank-0 rank-1 rank-2
205: Root 10
206: Leaf 2 3 4
207: Leafupdate 1 3 6
208: */
209: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
210: {
211: PetscSFLink link;
212: MPI_Comm comm;
213: PetscMPIInt count;
215: PetscObjectGetComm((PetscObject)sf,&comm);
217: /* Copy leafdata to leafupdate */
218: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_FETCH,&link);
219: PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata); /* Sync the device */
220: (*link->Memcpy)(link,leafmtype,leafupdate,leafmtype,leafdata,sf->nleaves*link->unitbytes);
221: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
223: /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
224: if (op == MPI_REPLACE) {
225: PetscMPIInt size,rank,prev,next;
226: MPI_Comm_rank(comm,&rank);
227: MPI_Comm_size(comm,&size);
228: prev = rank ? rank-1 : MPI_PROC_NULL;
229: next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
230: PetscMPIIntCast(sf->nleaves,&count);
231: MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);
232: } else {
233: PetscMPIIntCast(sf->nleaves*link->bs,&count);
234: MPI_Exscan(MPI_IN_PLACE,leafupdate,count,link->basicunit,op,comm);
235: }
236: PetscSFLinkReclaim(sf,&link);
237: PetscSFBcastBegin(sf,unit,rootdata,leafupdate,op);
238: PetscSFBcastEnd(sf,unit,rootdata,leafupdate,op);
240: /* Bcast roots to rank 0's leafupdate */
241: PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate); /* Using this line makes Allgather SFs able to inherit this routine */
243: /* Reduce leafdata to rootdata */
244: PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);
245: return 0;
246: }
248: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
249: {
250: PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);
251: return 0;
252: }
254: /* Get root ranks accessing my leaves */
255: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
256: {
257: PetscInt i,j,k,size;
258: const PetscInt *range;
260: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
261: if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
262: size = sf->nranks;
263: PetscLayoutGetRanges(sf->map,&range);
264: PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);
265: for (i=0; i<size; i++) sf->ranks[i] = i;
266: PetscArraycpy(sf->roffset,range,size+1);
267: for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
268: for (i=0; i<size; i++) {
269: for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
270: }
271: }
273: if (nranks) *nranks = sf->nranks;
274: if (ranks) *ranks = sf->ranks;
275: if (roffset) *roffset = sf->roffset;
276: if (rmine) *rmine = sf->rmine;
277: if (rremote) *rremote = sf->rremote;
278: return 0;
279: }
281: /* Get leaf ranks accessing my roots */
282: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
283: {
284: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
285: MPI_Comm comm;
286: PetscMPIInt size,rank;
287: PetscInt i,j;
289: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
290: PetscObjectGetComm((PetscObject)sf,&comm);
291: MPI_Comm_size(comm,&size);
292: MPI_Comm_rank(comm,&rank);
293: if (niranks) *niranks = size;
295: /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
296: sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
297: */
298: if (iranks) {
299: if (!dat->iranks) {
300: PetscMalloc1(size,&dat->iranks);
301: dat->iranks[0] = rank;
302: for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
303: }
304: *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
305: }
307: if (ioffset) {
308: if (!dat->ioffset) {
309: PetscMalloc1(size+1,&dat->ioffset);
310: for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
311: }
312: *ioffset = dat->ioffset;
313: }
315: if (irootloc) {
316: if (!dat->irootloc) {
317: PetscMalloc1(sf->nleaves,&dat->irootloc);
318: for (i=0; i<size; i++) {
319: for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
320: }
321: }
322: *irootloc = dat->irootloc;
323: }
324: return 0;
325: }
327: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
328: {
329: PetscInt i,nroots,nleaves,rstart,*ilocal;
330: PetscSFNode *iremote;
331: PetscSF lsf;
333: nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
334: nroots = nleaves;
335: PetscMalloc1(nleaves,&ilocal);
336: PetscMalloc1(nleaves,&iremote);
337: PetscLayoutGetRange(sf->map,&rstart,NULL);
339: for (i=0; i<nleaves; i++) {
340: ilocal[i] = rstart + i; /* lsf does not change leave indices */
341: iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */
342: iremote[i].index = i; /* root index */
343: }
345: PetscSFCreate(PETSC_COMM_SELF,&lsf);
346: PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
347: PetscSFSetUp(lsf);
348: *out = lsf;
349: return 0;
350: }
352: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
353: {
354: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
356: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
357: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
359: sf->ops->SetUp = PetscSFSetUp_Allgatherv;
360: sf->ops->Reset = PetscSFReset_Allgatherv;
361: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
362: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
363: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
364: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
365: sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv;
366: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv;
367: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
368: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
369: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
370: sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv;
372: PetscNewLog(sf,&dat);
373: sf->data = (void*)dat;
374: return 0;
375: }