Actual source code: sfalltoall.c

petsc-3.13.6 2020-09-29
Report Typos and Errors
  1:  #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
  2:  #include <../src/vec/is/sf/impls/basic/allgather/sfallgather.h>
  3:  #include <../src/vec/is/sf/impls/basic/gatherv/sfgatherv.h>

  5: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used, which is not a big deal */
  6: typedef PetscSF_Allgatherv PetscSF_Alltoall;

  8: /*===================================================================================*/
  9: /*              Implementations of SF public APIs                                    */
 10: /*===================================================================================*/
 11: static PetscErrorCode PetscSFGetGraph_Alltoall(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
 12: {
 14:   PetscInt       i;

 17:   if (nroots)  *nroots  = sf->nroots;
 18:   if (nleaves) *nleaves = sf->nleaves;
 19:   if (ilocal)  *ilocal  = NULL; /* Contiguous local indices */
 20:   if (iremote) {
 21:     if (!sf->remote) {
 22:       PetscMalloc1(sf->nleaves,&sf->remote);
 23:       sf->remote_alloc = sf->remote;
 24:       for (i=0; i<sf->nleaves; i++) {
 25:         sf->remote[i].rank  = i;
 26:         sf->remote[i].index = i;
 27:       }
 28:     }
 29:     *iremote = sf->remote;
 30:   }
 31:   return(0);
 32: }

 34: static PetscErrorCode PetscSFBcastAndOpBegin_Alltoall(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
 35: {
 36:   PetscErrorCode       ierr;
 37:   PetscSFLink          link;
 38:   MPI_Comm             comm;
 39:   void                 *rootbuf = NULL,*leafbuf = NULL; /* buffer used by MPI */
 40:   MPI_Request          *req;

 43:   PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);
 44:   PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);
 45:   PetscObjectGetComm((PetscObject)sf,&comm);
 46:   PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_../../../../../..2LEAF,&rootbuf,&leafbuf,&req,NULL);
 47:   MPIU_Ialltoall(rootbuf,1,unit,leafbuf,1,unit,comm,req);
 48:   return(0);
 49: }

 51: static PetscErrorCode PetscSFReduceBegin_Alltoall(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
 52: {
 53:   PetscErrorCode       ierr;
 54:   PetscSFLink          link;
 55:   MPI_Comm             comm;
 56:   void                 *rootbuf = NULL,*leafbuf = NULL; /* buffer used by MPI */
 57:   MPI_Request          *req;

 60:   PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);
 61:   PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);
 62:   PetscObjectGetComm((PetscObject)sf,&comm);
 63:   PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2../../../../../..,&rootbuf,&leafbuf,&req,NULL);
 64:   MPIU_Ialltoall(leafbuf,1,unit,rootbuf,1,unit,comm,req);
 65:   return(0);
 66: }

 68: static PetscErrorCode PetscSFCreateLocalSF_Alltoall(PetscSF sf,PetscSF *out)
 69: {
 71:   PetscInt       nroots = 1,nleaves = 1,*ilocal;
 72:   PetscSFNode    *iremote = NULL;
 73:   PetscSF        lsf;
 74:   PetscMPIInt    rank;

 77:   nroots  = 1;
 78:   nleaves = 1;
 79:   MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
 80:   PetscMalloc1(nleaves,&ilocal);
 81:   PetscMalloc1(nleaves,&iremote);
 82:   ilocal[0]        = rank;
 83:   iremote[0].rank  = 0;    /* rank in PETSC_COMM_SELF */
 84:   iremote[0].index = rank; /* LocalSF is an embedded SF. Indices are not remapped */

 86:   PetscSFCreate(PETSC_COMM_SELF,&lsf);
 87:   PetscSFSetGraph(lsf,nroots,nleaves,NULL/*contiguous leaves*/,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
 88:   PetscSFSetUp(lsf);
 89:   *out = lsf;
 90:   return(0);
 91: }

 93: static PetscErrorCode PetscSFCreateEmbeddedSF_Alltoall(PetscSF sf,PetscInt nselected,const PetscInt *selected,PetscSF *newsf)
 94: {
 96:   PetscInt       i,*tmproots,*ilocal,ndranks,ndiranks;
 97:   PetscSFNode    *iremote;
 98:   PetscMPIInt    nroots,*roots,nleaves,*leaves,rank;
 99:   MPI_Comm       comm;
100:   PetscSF_Basic  *bas;
101:   PetscSF        esf;

104:   PetscObjectGetComm((PetscObject)sf,&comm);
105:   MPI_Comm_rank(comm,&rank);

107:   /* Uniq selected[] and store the result in roots[] */
108:   PetscMalloc1(nselected,&tmproots);
109:   PetscArraycpy(tmproots,selected,nselected);
110:   PetscSortRemoveDupsInt(&nselected,tmproots); /* nselected might be changed */
111:   if (tmproots[0] < 0 || tmproots[nselected-1] >= sf->nroots) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"Min/Max root indices %D/%D are not in [0,%D)",tmproots[0],tmproots[nselected-1],sf->nroots);
112:   nroots = nselected;   /* For Alltoall, we know root indices will not overflow MPI_INT */
113:   PetscMalloc1(nselected,&roots);
114:   for (i=0; i<nselected; i++) roots[i] = tmproots[i];
115:   PetscFree(tmproots);

117:   /* Find out which leaves are still connected to roots in the embedded sf. Expect PetscCommBuildTwoSided is more scalable than MPI_Alltoall */
118:   PetscCommBuildTwoSided(comm,0/*empty msg*/,MPI_INT/*fake*/,nroots,roots,NULL/*todata*/,&nleaves,&leaves,NULL/*fromdata*/);

120:   /* Move myself ahead if rank is in leaves[], since I am a distinguished rank */
121:   ndranks = 0;
122:   for (i=0; i<nleaves; i++) {
123:     if (leaves[i] == rank) {leaves[i] = -rank; ndranks = 1; break;}
124:   }
125:   PetscSortMPIInt(nleaves,leaves);
126:   if (nleaves && leaves[0] < 0) leaves[0] = rank;

128:   /* Build esf and fill its fields manually (without calling PetscSFSetUp) */
129:   PetscMalloc1(nleaves,&ilocal);
130:   PetscMalloc1(nleaves,&iremote);
131:   for (i=0; i<nleaves; i++) { /* 1:1 map from roots to leaves */
132:     ilocal[i]        = leaves[i];
133:     iremote[i].rank  = leaves[i];
134:     iremote[i].index = leaves[i];
135:   }
136:   PetscSFCreate(comm,&esf);
137:   PetscSFSetType(esf,PETSCSFBASIC); /* This optimized routine can only create a basic sf */
138:   PetscSFSetGraph(esf,sf->nleaves,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);

140:   /* As if we are calling PetscSFSetUpRanks(esf,self's group) */
141:   PetscMalloc4(nleaves,&esf->ranks,nleaves+1,&esf->roffset,nleaves,&esf->rmine,nleaves,&esf->rremote);
142:   esf->nranks     = nleaves;
143:   esf->ndranks    = ndranks;
144:   esf->roffset[0] = 0;
145:   for (i=0; i<nleaves; i++) {
146:     esf->ranks[i]     = leaves[i];
147:     esf->roffset[i+1] = i+1;
148:     esf->rmine[i]     = leaves[i];
149:     esf->rremote[i]   = leaves[i];
150:   }

152:   /* Set up esf->data, the incoming communication (i.e., recv info), which is usually done by PetscSFSetUp_Basic */
153:   bas  = (PetscSF_Basic*)esf->data;
154:   PetscMalloc2(nroots,&bas->iranks,nroots+1,&bas->ioffset);
155:   PetscMalloc1(nroots,&bas->irootloc);
156:   /* Move myself ahead if rank is in roots[], since I am a distinguished irank */
157:   ndiranks = 0;
158:   for (i=0; i<nroots; i++) {
159:     if (roots[i] == rank) {roots[i] = -rank; ndiranks = 1; break;}
160:   }
161:   PetscSortMPIInt(nroots,roots);
162:   if (nroots && roots[0] < 0) roots[0] = rank;

164:   bas->niranks    = nroots;
165:   bas->ndiranks   = ndiranks;
166:   bas->ioffset[0] = 0;
167:   bas->itotal     = nroots;
168:   for (i=0; i<nroots; i++) {
169:     bas->iranks[i]    = roots[i];
170:     bas->ioffset[i+1] = i+1;
171:     bas->irootloc[i]  = roots[i];
172:   }

174:   /* See PetscSFCreateEmbeddedSF_Basic */
175:   esf->nleafreqs  = esf->nranks - esf->ndranks;
176:   bas->nrootreqs  = bas->niranks - bas->ndiranks;
177:   esf->persistent = PETSC_TRUE;
178:   /* Setup packing related fields */
179:   PetscSFSetUpPackFields(esf);

181:   esf->setupcalled = PETSC_TRUE; /* We have done setup ourselves! */
182:   *newsf = esf;
183:   return(0);
184: }

186: PETSC_INTERN PetscErrorCode PetscSFCreate_Alltoall(PetscSF sf)
187: {
188:   PetscErrorCode   ierr;
189:   PetscSF_Alltoall *dat = (PetscSF_Alltoall*)sf->data;

192:   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Basic;
193:   sf->ops->ReduceEnd       = PetscSFReduceEnd_Basic;

195:   /* Inherit from Allgatherv. It is astonishing Alltoall can inherit so much from Allgather(v) */
196:   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
197:   sf->ops->Reset           = PetscSFReset_Allgatherv;
198:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
199:   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;

201:   /* Inherit from Allgather. Every process gathers equal-sized data from others, which enables this inheritance. */
202:   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
203:   sf->ops->SetUp           = PetscSFSetUp_Allgather;

205:   /* Inherit from Gatherv. Each root has only one leaf connected, which enables this inheritance */
206:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Gatherv;

208:   /* Alltoall stuff */
209:   sf->ops->GetGraph         = PetscSFGetGraph_Alltoall;
210:   sf->ops->BcastAndOpBegin  = PetscSFBcastAndOpBegin_Alltoall;
211:   sf->ops->ReduceBegin      = PetscSFReduceBegin_Alltoall;
212:   sf->ops->CreateLocalSF    = PetscSFCreateLocalSF_Alltoall;
213:   sf->ops->CreateEmbeddedSF = PetscSFCreateEmbeddedSF_Alltoall;

215:   PetscNewLog(sf,&dat);
216:   sf->data = (void*)dat;
217:   return(0);
218: }