Actual source code: mpimesg.c

petsc-3.9.4 2018-09-11
Report Typos and Errors

  2:  #include <petscsys.h>


  5: /*@C
  6:   PetscGatherNumberOfMessages -  Computes the number of messages a node expects to receive

  8:   Collective on MPI_Comm

 10:   Input Parameters:
 11: + comm     - Communicator
 12: . iflags   - an array of integers of length sizeof(comm). A '1' in ilengths[i] represent a
 13:              message from current node to ith node. Optionally NULL
 14: - ilengths - Non zero ilengths[i] represent a message to i of length ilengths[i].
 15:              Optionally NULL.

 17:   Output Parameters:
 18: . nrecvs    - number of messages received

 20:   Level: developer

 22:   Concepts: mpi utility

 24:   Notes:
 25:   With this info, the correct message lengths can be determined using
 26:   PetscGatherMessageLengths()

 28:   Either iflags or ilengths should be provided.  If iflags is not
 29:   provided (NULL) it can be computed from ilengths. If iflags is
 30:   provided, ilengths is not required.

 32: .seealso: PetscGatherMessageLengths()
 33: @*/
 34: PetscErrorCode  PetscGatherNumberOfMessages(MPI_Comm comm,const PetscMPIInt iflags[],const PetscMPIInt ilengths[],PetscMPIInt *nrecvs)
 35: {
 36:   PetscMPIInt    size,rank,*recv_buf,i,*iflags_local = NULL,*iflags_localm = NULL;

 40:   MPI_Comm_size(comm,&size);
 41:   MPI_Comm_rank(comm,&rank);

 43:   PetscMalloc2(size,&recv_buf,size,&iflags_localm);

 45:   /* If iflags not provided, compute iflags from ilengths */
 46:   if (!iflags) {
 47:     if (!ilengths) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Either iflags or ilengths should be provided");
 48:     iflags_local = iflags_localm;
 49:     for (i=0; i<size; i++) {
 50:       if (ilengths[i]) iflags_local[i] = 1;
 51:       else iflags_local[i] = 0;
 52:     }
 53:   } else iflags_local = (PetscMPIInt*) iflags;

 55:   /* Post an allreduce to determine the numer of messages the current node will receive */
 56:   MPIU_Allreduce(iflags_local,recv_buf,size,MPI_INT,MPI_SUM,comm);
 57:   *nrecvs = recv_buf[rank];

 59:   PetscFree2(recv_buf,iflags_localm);
 60:   return(0);
 61: }


 64: /*@C
 65:   PetscGatherMessageLengths - Computes info about messages that a MPI-node will receive,
 66:   including (from-id,length) pairs for each message.

 68:   Collective on MPI_Comm

 70:   Input Parameters:
 71: + comm      - Communicator
 72: . nsends    - number of messages that are to be sent.
 73: . nrecvs    - number of messages being received
 74: - ilengths  - an array of integers of length sizeof(comm)
 75:               a non zero ilengths[i] represent a message to i of length ilengths[i]


 78:   Output Parameters:
 79: + onodes    - list of node-ids from which messages are expected
 80: - olengths  - corresponding message lengths

 82:   Level: developer

 84:   Concepts: mpi utility

 86:   Notes:
 87:   With this info, the correct MPI_Irecv() can be posted with the correct
 88:   from-id, with a buffer with the right amount of memory required.

 90:   The calling function deallocates the memory in onodes and olengths

 92:   To determine nrecevs, one can use PetscGatherNumberOfMessages()

 94: .seealso: PetscGatherNumberOfMessages()
 95: @*/
 96: PetscErrorCode  PetscGatherMessageLengths(MPI_Comm comm,PetscMPIInt nsends,PetscMPIInt nrecvs,const PetscMPIInt ilengths[],PetscMPIInt **onodes,PetscMPIInt **olengths)
 97: {
 99:   PetscMPIInt    size,tag,i,j;
100:   MPI_Request    *s_waits  = NULL,*r_waits = NULL;
101:   MPI_Status     *w_status = NULL;

104:   MPI_Comm_size(comm,&size);
105:   PetscCommGetNewTag(comm,&tag);

107:   /* cannot use PetscMalloc3() here because in the call to MPI_Waitall() they MUST be contiguous */
108:   PetscMalloc2(nrecvs+nsends,&r_waits,nrecvs+nsends,&w_status);
109:   s_waits = r_waits+nrecvs;

111:   /* Post the Irecv to get the message length-info */
112:   PetscMalloc1(nrecvs,olengths);
113:   for (i=0; i<nrecvs; i++) {
114:     MPI_Irecv((*olengths)+i,1,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits+i);
115:   }

117:   /* Post the Isends with the message length-info */
118:   for (i=0,j=0; i<size; ++i) {
119:     if (ilengths[i]) {
120:       MPI_Isend((void*)(ilengths+i),1,MPI_INT,i,tag,comm,s_waits+j);
121:       j++;
122:     }
123:   }

125:   /* Post waits on sends and receivs */
126:   if (nrecvs+nsends) {MPI_Waitall(nrecvs+nsends,r_waits,w_status);}

128:   /* Pack up the received data */
129:   PetscMalloc1(nrecvs,onodes);
130:   for (i=0; i<nrecvs; ++i) (*onodes)[i] = w_status[i].MPI_SOURCE;
131:   PetscFree2(r_waits,w_status);
132:   return(0);
133: }

135: /*@C
136:   PetscGatherMessageLengths2 - Computes info about messages that a MPI-node will receive,
137:   including (from-id,length) pairs for each message. Same functionality as PetscGatherMessageLengths()
138:   except it takes TWO ilenths and output TWO olengths.

140:   Collective on MPI_Comm

142:   Input Parameters:
143: + comm      - Communicator
144: . nsends    - number of messages that are to be sent.
145: . nrecvs    - number of messages being received
146: - ilengths1, ilengths2 - array of integers of length sizeof(comm)
147:               a non zero ilengths[i] represent a message to i of length ilengths[i]

149:   Output Parameters:
150: + onodes    - list of node-ids from which messages are expected
151: - olengths1, olengths2 - corresponding message lengths

153:   Level: developer

155:   Concepts: mpi utility

157:   Notes:
158:   With this info, the correct MPI_Irecv() can be posted with the correct
159:   from-id, with a buffer with the right amount of memory required.

161:   The calling function deallocates the memory in onodes and olengths

163:   To determine nrecevs, one can use PetscGatherNumberOfMessages()

165: .seealso: PetscGatherMessageLengths() and PetscGatherNumberOfMessages()
166: @*/
167: PetscErrorCode  PetscGatherMessageLengths2(MPI_Comm comm,PetscMPIInt nsends,PetscMPIInt nrecvs,const PetscMPIInt ilengths1[],const PetscMPIInt ilengths2[],PetscMPIInt **onodes,PetscMPIInt **olengths1,PetscMPIInt **olengths2)
168: {
170:   PetscMPIInt    size,tag,i,j,*buf_s = NULL,*buf_r = NULL,*buf_j = NULL;
171:   MPI_Request    *s_waits  = NULL,*r_waits = NULL;
172:   MPI_Status     *w_status = NULL;

175:   MPI_Comm_size(comm,&size);
176:   PetscCommGetNewTag(comm,&tag);

178:   /* cannot use PetscMalloc5() because r_waits and s_waits must be contiguous for the call to MPI_Waitall() */
179:   PetscMalloc4(nrecvs+nsends,&r_waits,2*nrecvs,&buf_r,2*nsends,&buf_s,nrecvs+nsends,&w_status);
180:   s_waits = r_waits + nrecvs;

182:   /* Post the Irecv to get the message length-info */
183:   PetscMalloc1(nrecvs+1,olengths1);
184:   PetscMalloc1(nrecvs+1,olengths2);
185:   for (i=0; i<nrecvs; i++) {
186:     buf_j = buf_r + (2*i);
187:     MPI_Irecv(buf_j,2,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits+i);
188:   }

190:   /* Post the Isends with the message length-info */
191:   for (i=0,j=0; i<size; ++i) {
192:     if (ilengths1[i]) {
193:       buf_j    = buf_s + (2*j);
194:       buf_j[0] = *(ilengths1+i);
195:       buf_j[1] = *(ilengths2+i);
196:       MPI_Isend(buf_j,2,MPI_INT,i,tag,comm,s_waits+j);
197:       j++;
198:     }
199:   }
200:   if (j != nsends) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"j %d not equal to expected number of sends %d\n",j,nsends);

202:   /* Post waits on sends and receivs */
203:   if (nrecvs+nsends) {MPI_Waitall(nrecvs+nsends,r_waits,w_status);}


206:   /* Pack up the received data */
207:   PetscMalloc1(nrecvs+1,onodes);
208:   for (i=0; i<nrecvs; ++i) {
209:     (*onodes)[i]    = w_status[i].MPI_SOURCE;
210:     buf_j           = buf_r + (2*i);
211:     (*olengths1)[i] = buf_j[0];
212:     (*olengths2)[i] = buf_j[1];
213:   }

215:   PetscFree4(r_waits,buf_r,buf_s,w_status);
216:   return(0);
217: }

219: /*

221:   Allocate a bufffer sufficient to hold messages of size specified in olengths.
222:   And post Irecvs on these buffers using node info from onodes

224:  */
225: PetscErrorCode  PetscPostIrecvInt(MPI_Comm comm,PetscMPIInt tag,PetscMPIInt nrecvs,const PetscMPIInt onodes[],const PetscMPIInt olengths[],PetscInt ***rbuf,MPI_Request **r_waits)
226: {
228:   PetscInt       **rbuf_t,i,len = 0;
229:   MPI_Request    *r_waits_t;

232:   /* compute memory required for recv buffers */
233:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */

235:   /* allocate memory for recv buffers */
236:   PetscMalloc1(nrecvs+1,&rbuf_t);
237:   PetscMalloc1(len,&rbuf_t[0]);
238:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

240:   /* Post the receives */
241:   PetscMalloc1(nrecvs,&r_waits_t);
242:   for (i=0; i<nrecvs; ++i) {
243:     MPI_Irecv(rbuf_t[i],olengths[i],MPIU_INT,onodes[i],tag,comm,r_waits_t+i);
244:   }

246:   *rbuf    = rbuf_t;
247:   *r_waits = r_waits_t;
248:   return(0);
249: }

251: PetscErrorCode  PetscPostIrecvScalar(MPI_Comm comm,PetscMPIInt tag,PetscMPIInt nrecvs,const PetscMPIInt onodes[],const PetscMPIInt olengths[],PetscScalar ***rbuf,MPI_Request **r_waits)
252: {
254:   PetscMPIInt    i;
255:   PetscScalar    **rbuf_t;
256:   MPI_Request    *r_waits_t;
257:   PetscInt       len = 0;

260:   /* compute memory required for recv buffers */
261:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */

263:   /* allocate memory for recv buffers */
264:   PetscMalloc1(nrecvs+1,&rbuf_t);
265:   PetscMalloc1(len,&rbuf_t[0]);
266:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

268:   /* Post the receives */
269:   PetscMalloc1(nrecvs,&r_waits_t);
270:   for (i=0; i<nrecvs; ++i) {
271:     MPI_Irecv(rbuf_t[i],olengths[i],MPIU_SCALAR,onodes[i],tag,comm,r_waits_t+i);
272:   }

274:   *rbuf    = rbuf_t;
275:   *r_waits = r_waits_t;
276:   return(0);
277: }