Actual source code: mpits.c
petsc-3.6.1 2015-08-06
1: #include <petscsys.h> /*I "petscsys.h" I*/
3: const char *const PetscBuildTwoSidedTypes[] = {
4: "ALLREDUCE",
5: "IBARRIER",
6: "PetscBuildTwoSidedType",
7: "PETSC_BUILDTWOSIDED_",
8: 0
9: };
11: static PetscBuildTwoSidedType _twosided_type = PETSC_BUILDTWOSIDED_NOTSET;
15: /*@
16: PetscCommBuildTwoSidedSetType - set algorithm to use when building two-sided communication
18: Logically Collective
20: Input Arguments:
21: + comm - PETSC_COMM_WORLD
22: - twosided - algorithm to use in subsequent calls to PetscCommBuildTwoSided()
24: Level: developer
26: Note:
27: This option is currently global, but could be made per-communicator.
29: .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedGetType()
30: @*/
31: PetscErrorCode PetscCommBuildTwoSidedSetType(MPI_Comm comm,PetscBuildTwoSidedType twosided)
32: {
34: #if defined(PETSC_USE_DEBUG)
36: PetscMPIInt ierr;
37: PetscMPIInt b1[2],b2[2];
38: b1[0] = -(PetscMPIInt)twosided;
39: b1[1] = (PetscMPIInt)twosided;
40: MPI_Allreduce(b1,b2,2,MPI_INT,MPI_MAX,comm);
41: if (-b2[0] != b2[1]) SETERRQ(comm,PETSC_ERR_ARG_WRONG,"Enum value must be same on all processes");
42: }
43: #endif
44: _twosided_type = twosided;
45: return(0);
46: }
50: /*@
51: PetscCommBuildTwoSidedGetType - set algorithm to use when building two-sided communication
53: Logically Collective
55: Output Arguments:
56: + comm - communicator on which to query algorithm
57: - twosided - algorithm to use for PetscCommBuildTwoSided()
59: Level: developer
61: .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedSetType()
62: @*/
63: PetscErrorCode PetscCommBuildTwoSidedGetType(MPI_Comm comm,PetscBuildTwoSidedType *twosided)
64: {
68: *twosided = PETSC_BUILDTWOSIDED_NOTSET;
69: if (_twosided_type == PETSC_BUILDTWOSIDED_NOTSET) {
70: #if defined(PETSC_HAVE_MPI_IBARRIER)
71: # if defined(PETSC_HAVE_MPICH_CH3_SOCK) && !defined(PETSC_HAVE_MPICH_CH3_SOCK_FIXED_NBC_PROGRESS)
72: /* Deadlock in Ibarrier: http://trac.mpich.org/projects/mpich/ticket/1785 */
73: _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE;
74: # else
75: _twosided_type = PETSC_BUILDTWOSIDED_IBARRIER;
76: # endif
77: #else
78: _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE;
79: #endif
80: PetscOptionsGetEnum(NULL,"-build_twosided",PetscBuildTwoSidedTypes,(PetscEnum*)&_twosided_type,NULL);
81: }
82: *twosided = _twosided_type;
83: return(0);
84: }
86: #if defined(PETSC_HAVE_MPI_IBARRIER)
90: static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscInt nto,const PetscMPIInt *toranks,const void *todata,PetscInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
91: {
93: PetscMPIInt nrecvs,tag,unitbytes,done;
94: PetscInt i;
95: char *tdata;
96: MPI_Request *sendreqs,barrier;
97: PetscSegBuffer segrank,segdata;
100: PetscCommGetNewTag(comm,&tag);
101: MPI_Type_size(dtype,&unitbytes);
102: tdata = (char*)todata;
103: PetscMalloc1(nto,&sendreqs);
104: for (i=0; i<nto; i++) {
105: MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);
106: }
107: PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);
108: PetscSegBufferCreate(unitbytes,4*count,&segdata);
110: nrecvs = 0;
111: barrier = MPI_REQUEST_NULL;
112: for (done=0; !done; ) {
113: PetscMPIInt flag;
114: MPI_Status status;
115: MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);
116: if (flag) { /* incoming message */
117: PetscMPIInt *recvrank;
118: void *buf;
119: PetscSegBufferGet(segrank,1,&recvrank);
120: PetscSegBufferGet(segdata,count,&buf);
121: *recvrank = status.MPI_SOURCE;
122: MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);
123: nrecvs++;
124: }
125: if (barrier == MPI_REQUEST_NULL) {
126: PetscMPIInt sent,nsends;
127: PetscMPIIntCast(nto,&nsends);
128: MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);
129: if (sent) {
130: MPI_Ibarrier(comm,&barrier);
131: PetscFree(sendreqs);
132: }
133: } else {
134: MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);
135: }
136: }
137: *nfrom = nrecvs;
138: PetscSegBufferExtractAlloc(segrank,fromranks);
139: PetscSegBufferDestroy(&segrank);
140: PetscSegBufferExtractAlloc(segdata,fromdata);
141: PetscSegBufferDestroy(&segdata);
142: return(0);
143: }
144: #endif
148: static PetscErrorCode PetscCommBuildTwoSided_Allreduce(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscInt nto,const PetscMPIInt *toranks,const void *todata,PetscInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
149: {
151: PetscMPIInt size,*iflags,nrecvs,tag,unitbytes,*franks;
152: PetscInt i;
153: char *tdata,*fdata;
154: MPI_Request *reqs,*sendreqs;
155: MPI_Status *statuses;
158: MPI_Comm_size(comm,&size);
159: PetscCalloc1(size,&iflags);
160: for (i=0; i<nto; i++) iflags[toranks[i]] = 1;
161: PetscGatherNumberOfMessages(comm,iflags,NULL,&nrecvs);
162: PetscFree(iflags);
164: PetscCommGetNewTag(comm,&tag);
165: MPI_Type_size(dtype,&unitbytes);
166: PetscMalloc(nrecvs*count*unitbytes,&fdata);
167: tdata = (char*)todata;
168: PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);
169: sendreqs = reqs + nrecvs;
170: for (i=0; i<nrecvs; i++) {
171: MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);
172: }
173: for (i=0; i<nto; i++) {
174: MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);
175: }
176: MPI_Waitall(nto+nrecvs,reqs,statuses);
177: PetscMalloc1(nrecvs,&franks);
178: for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
179: PetscFree2(reqs,statuses);
181: *nfrom = nrecvs;
182: *fromranks = franks;
183: *(void**)fromdata = fdata;
184: return(0);
185: }
189: /*@C
190: PetscCommBuildTwoSided - discovers communicating ranks given one-sided information, moving constant-sized data in the process (often message lengths)
192: Collective on MPI_Comm
194: Input Arguments:
195: + comm - communicator
196: . count - number of entries to send/receive (must match on all ranks)
197: . dtype - datatype to send/receive from each rank (must match on all ranks)
198: . nto - number of ranks to send data to
199: . toranks - ranks to send to (array of length nto)
200: - todata - data to send to each rank (packed)
202: Output Arguments:
203: + nfrom - number of ranks receiving messages from
204: . fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
205: - fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
207: Level: developer
209: Notes:
210: This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
211: PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other constant-size data.
213: Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
215: References:
216: The MPI_Ibarrier implementation uses the algorithm in
217: Hoefler, Siebert and Lumsdaine, Scalable communication protocols for dynamic sparse data exchange, 2010.
219: .seealso: PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
220: @*/
221: PetscErrorCode PetscCommBuildTwoSided(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscInt nto,const PetscMPIInt *toranks,const void *todata,PetscInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
222: {
223: PetscErrorCode ierr;
224: PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET;
227: PetscCommBuildTwoSidedGetType(comm,&buildtype);
228: switch (buildtype) {
229: case PETSC_BUILDTWOSIDED_IBARRIER:
230: #if defined(PETSC_HAVE_MPI_IBARRIER)
231: PetscCommBuildTwoSided_Ibarrier(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);
232: #else
233: SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)");
234: #endif
235: break;
236: case PETSC_BUILDTWOSIDED_ALLREDUCE:
237: PetscCommBuildTwoSided_Allreduce(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);
238: break;
239: default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication");
240: }
241: return(0);
242: }