Actual source code: sfallgather.c
petsc-3.14.6 2021-03-30
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
4: typedef PetscSF_Allgatherv PetscSF_Allgather;
6: PETSC_INTERN PetscErrorCode PetscSFBcastAndOpBegin_Gather(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
8: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
9: {
10: PetscInt i;
11: PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data;
14: for (i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) {
15: sf->leafbuflen[i] = 0;
16: sf->leafstart[i] = 0;
17: sf->leafcontig[i] = PETSC_TRUE;
18: sf->leafdups[i] = PETSC_FALSE;
19: dat->rootbuflen[i] = 0;
20: dat->rootstart[i] = 0;
21: dat->rootcontig[i] = PETSC_TRUE;
22: dat->rootdups[i] = PETSC_FALSE;
23: }
25: sf->leafbuflen[PETSCSF_REMOTE] = sf->nleaves;
26: dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
27: sf->persistent = PETSC_FALSE;
28: sf->nleafreqs = 0; /* MPI collectives only need one request. We treat it as a root request. */
29: dat->nrootreqs = 1;
30: return(0);
31: }
33: static PetscErrorCode PetscSFBcastAndOpBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
34: {
35: PetscErrorCode ierr;
36: PetscSFLink link;
37: PetscMPIInt sendcount;
38: MPI_Comm comm;
39: void *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */
40: MPI_Request *req;
43: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);
44: PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);
45: PetscObjectGetComm((PetscObject)sf,&comm);
46: PetscMPIIntCast(sf->nroots,&sendcount);
47: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_../../../../../..2LEAF,&rootbuf,&leafbuf,&req,NULL);
48: MPIU_Iallgather(rootbuf,sendcount,unit,leafbuf,sendcount,unit,comm,req);
49: return(0);
50: }
52: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
53: {
54: PetscErrorCode ierr;
55: PetscSFLink link;
56: PetscInt rstart;
57: MPI_Comm comm;
58: PetscMPIInt rank,count,recvcount;
59: void *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */
60: PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data;
61: MPI_Request *req;
64: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);
65: if (op == MPIU_REPLACE) {
66: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
67: PetscLayoutGetRange(sf->map,&rstart,NULL);
68: (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
69: } else {
70: PetscObjectGetComm((PetscObject)sf,&comm);
71: MPI_Comm_rank(comm,&rank);
72: PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);
73: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2../../../../../..,&rootbuf,&leafbuf,&req,NULL);
74: PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);
75: if (!rank && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
76: PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);
77: }
78: if (!rank && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
79: PetscMPIIntCast(sf->nleaves*link->bs,&count);
80: MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */
81: MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],recvcount,unit,rootbuf,recvcount,unit,0/*rank 0*/,comm,req);
82: }
83: return(0);
84: }
86: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
87: {
88: PetscErrorCode ierr;
89: PetscSFLink link;
90: PetscMPIInt rank;
93: PetscSFBcastAndOpBegin_Gather(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPIU_REPLACE);
94: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
95: PetscSFLinkMPIWaitall(sf,link,PETSCSF_../../../../../..2LEAF);
96: MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
97: if (!rank && leafmtype == PETSC_MEMTYPE_DEVICE && !sf->use_gpu_aware_mpi) {
98: (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);
99: }
100: PetscSFLinkReclaim(sf,&link);
101: return(0);
102: }
104: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
105: {
106: PetscErrorCode ierr;
107: PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data;
110: sf->ops->BcastAndOpEnd = PetscSFBcastAndOpEnd_Basic;
111: sf->ops->ReduceEnd = PetscSFReduceEnd_Basic;
113: /* Inherit from Allgatherv */
114: sf->ops->Reset = PetscSFReset_Allgatherv;
115: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
116: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
117: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
118: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
119: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
120: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
121: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
123: /* Allgather stuff */
124: sf->ops->SetUp = PetscSFSetUp_Allgather;
125: sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Allgather;
126: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
127: sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;
129: PetscNewLog(sf,&dat);
130: sf->data = (void*)dat;
131: return(0);
132: }