Actual source code: sfallgather.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
4: typedef PetscSF_Allgatherv PetscSF_Allgather;
6: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gather(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
8: PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
9: {
10: PetscInt i;
11: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
13: PetscFunctionBegin;
14: for (i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
15: sf->leafbuflen[i] = 0;
16: sf->leafstart[i] = 0;
17: sf->leafcontig[i] = PETSC_TRUE;
18: sf->leafdups[i] = PETSC_FALSE;
19: dat->rootbuflen[i] = 0;
20: dat->rootstart[i] = 0;
21: dat->rootcontig[i] = PETSC_TRUE;
22: dat->rootdups[i] = PETSC_FALSE;
23: }
25: sf->leafbuflen[PETSCSF_REMOTE] = sf->nleaves;
26: dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
27: sf->persistent = PETSC_FALSE;
28: sf->nleafreqs = 0; /* MPI collectives only need one request. We treat it as a root request. */
29: dat->nrootreqs = 1;
30: PetscFunctionReturn(PETSC_SUCCESS);
31: }
33: static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
34: {
35: PetscSFLink link;
36: PetscMPIInt sendcount;
37: MPI_Comm comm;
38: void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
39: MPI_Request *req;
41: PetscFunctionBegin;
42: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
43: PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
44: PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
45: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
46: PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
47: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
48: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
49: PetscCallMPI(MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req));
50: PetscFunctionReturn(PETSC_SUCCESS);
51: }
53: static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
54: {
55: PetscSFLink link;
56: PetscInt rstart;
57: MPI_Comm comm;
58: PetscMPIInt rank, count, recvcount;
59: void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
60: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
61: MPI_Request *req;
63: PetscFunctionBegin;
64: PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
65: if (op == MPI_REPLACE) {
66: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
67: PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
68: PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
69: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); /* Sync the device to host memcpy */
70: } else {
71: PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
72: PetscCallMPI(MPI_Comm_rank(comm, &rank));
73: PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
74: PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
75: PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
76: PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
77: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
78: PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
79: }
80: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
81: PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
82: PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
83: PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
84: PetscCallMPI(MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req));
85: }
86: PetscFunctionReturn(PETSC_SUCCESS);
87: }
89: static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
90: {
91: PetscSFLink link;
92: PetscMPIInt rank;
94: PetscFunctionBegin;
95: PetscCall(PetscSFBcastBegin_Gather(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE));
96: PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
97: PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
98: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
99: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
100: PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
101: }
102: PetscCall(PetscSFLinkReclaim(sf, &link));
103: PetscFunctionReturn(PETSC_SUCCESS);
104: }
106: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
107: {
108: PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
110: PetscFunctionBegin;
111: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
112: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
114: /* Inherit from Allgatherv */
115: sf->ops->Reset = PetscSFReset_Allgatherv;
116: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
117: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
118: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
119: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
120: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
121: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
122: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
124: /* Allgather stuff */
125: sf->ops->SetUp = PetscSFSetUp_Allgather;
126: sf->ops->BcastBegin = PetscSFBcastBegin_Allgather;
127: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
128: sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;
130: PetscCall(PetscNew(&dat));
131: sf->data = (void *)dat;
132: PetscFunctionReturn(PETSC_SUCCESS);
133: }