Actual source code: mpimesg.c

petsc-3.4.5 2014-06-29
  2: #include <petscsys.h>        /*I  "petscsys.h"  I*/


  7: /*@C
  8:   PetscGatherNumberOfMessages -  Computes the number of messages a node expects to receive

 10:   Collective on MPI_Comm

 12:   Input Parameters:
 13: + comm     - Communicator
 14: . iflags   - an array of integers of length sizeof(comm). A '1' in ilengths[i] represent a
 15:              message from current node to ith node. Optionally NULL
 16: - ilengths - Non zero ilengths[i] represent a message to i of length ilengths[i].
 17:              Optionally NULL.

 19:   Output Parameters:
 20: . nrecvs    - number of messages received

 22:   Level: developer

 24:   Concepts: mpi utility

 26:   Notes:
 27:   With this info, the correct message lengths can be determined using
 28:   PetscGatherMessageLengths()

 30:   Either iflags or ilengths should be provided.  If iflags is not
 31:   provided (NULL) it can be computed from ilengths. If iflags is
 32:   provided, ilengths is not required.

 34: .seealso: PetscGatherMessageLengths()
 35: @*/
 36: PetscErrorCode  PetscGatherNumberOfMessages(MPI_Comm comm,const PetscMPIInt iflags[],const PetscMPIInt ilengths[],PetscMPIInt *nrecvs)
 37: {
 38:   PetscMPIInt    size,rank,*recv_buf,i,*iflags_local = NULL,*iflags_localm = NULL;

 42:   MPI_Comm_size(comm,&size);
 43:   MPI_Comm_rank(comm,&rank);

 45:   PetscMalloc2(size,PetscMPIInt,&recv_buf,size,PetscMPIInt,&iflags_localm);

 47:   /* If iflags not provided, compute iflags from ilengths */
 48:   if (!iflags) {
 49:     if (!ilengths) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Either iflags or ilengths should be provided");
 50:     iflags_local = iflags_localm;
 51:     for (i=0; i<size; i++) {
 52:       if (ilengths[i]) iflags_local[i] = 1;
 53:       else iflags_local[i] = 0;
 54:     }
 55:   } else iflags_local = (PetscMPIInt*) iflags;

 57:   /* Post an allreduce to determine the numer of messages the current node will receive */
 58:   MPI_Allreduce(iflags_local,recv_buf,size,MPI_INT,MPI_SUM,comm);
 59:   *nrecvs = recv_buf[rank];

 61:   PetscFree2(recv_buf,iflags_localm);
 62:   return(0);
 63: }


 68: /*@C
 69:   PetscGatherMessageLengths - Computes info about messages that a MPI-node will receive,
 70:   including (from-id,length) pairs for each message.

 72:   Collective on MPI_Comm

 74:   Input Parameters:
 75: + comm      - Communicator
 76: . nsends    - number of messages that are to be sent.
 77: . nrecvs    - number of messages being received
 78: - ilengths  - an array of integers of length sizeof(comm)
 79:               a non zero ilengths[i] represent a message to i of length ilengths[i]


 82:   Output Parameters:
 83: + onodes    - list of node-ids from which messages are expected
 84: - olengths  - corresponding message lengths

 86:   Level: developer

 88:   Concepts: mpi utility

 90:   Notes:
 91:   With this info, the correct MPI_Irecv() can be posted with the correct
 92:   from-id, with a buffer with the right amount of memory required.

 94:   The calling function deallocates the memory in onodes and olengths

 96:   To determine nrecevs, one can use PetscGatherNumberOfMessages()

 98: .seealso: PetscGatherNumberOfMessages()
 99: @*/
100: PetscErrorCode  PetscGatherMessageLengths(MPI_Comm comm,PetscMPIInt nsends,PetscMPIInt nrecvs,const PetscMPIInt ilengths[],PetscMPIInt **onodes,PetscMPIInt **olengths)
101: {
103:   PetscMPIInt    size,tag,i,j;
104:   MPI_Request    *s_waits  = NULL,*r_waits = NULL;
105:   MPI_Status     *w_status = NULL;

108:   MPI_Comm_size(comm,&size);
109:   PetscCommGetNewTag(comm,&tag);

111:   /* cannot use PetscMalloc3() here because in the call to MPI_Waitall() they MUST be contiguous */
112:   PetscMalloc2(nrecvs+nsends,MPI_Request,&r_waits,nrecvs+nsends,MPI_Status,&w_status);
113:   s_waits = r_waits+nrecvs;

115:   /* Post the Irecv to get the message length-info */
116:   PetscMalloc(nrecvs*sizeof(PetscMPIInt),olengths);
117:   for (i=0; i<nrecvs; i++) {
118:     MPI_Irecv((*olengths)+i,1,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits+i);
119:   }

121:   /* Post the Isends with the message length-info */
122:   for (i=0,j=0; i<size; ++i) {
123:     if (ilengths[i]) {
124:       MPI_Isend((void*)(ilengths+i),1,MPI_INT,i,tag,comm,s_waits+j);
125:       j++;
126:     }
127:   }

129:   /* Post waits on sends and receivs */
130:   if (nrecvs+nsends) {MPI_Waitall(nrecvs+nsends,r_waits,w_status);}

132:   /* Pack up the received data */
133:   PetscMalloc(nrecvs*sizeof(PetscMPIInt),onodes);
134:   for (i=0; i<nrecvs; ++i) (*onodes)[i] = w_status[i].MPI_SOURCE;
135:   PetscFree2(r_waits,w_status);
136:   return(0);
137: }

141: /*@C
142:   PetscGatherMessageLengths2 - Computes info about messages that a MPI-node will receive,
143:   including (from-id,length) pairs for each message. Same functionality as PetscGatherMessageLengths()
144:   except it takes TWO ilenths and output TWO olengths.

146:   Collective on MPI_Comm

148:   Input Parameters:
149: + comm      - Communicator
150: . nsends    - number of messages that are to be sent.
151: . nrecvs    - number of messages being received
152: - ilengths1, ilengths2 - array of integers of length sizeof(comm)
153:               a non zero ilengths[i] represent a message to i of length ilengths[i]

155:   Output Parameters:
156: + onodes    - list of node-ids from which messages are expected
157: - olengths1, olengths2 - corresponding message lengths

159:   Level: developer

161:   Concepts: mpi utility

163:   Notes:
164:   With this info, the correct MPI_Irecv() can be posted with the correct
165:   from-id, with a buffer with the right amount of memory required.

167:   The calling function deallocates the memory in onodes and olengths

169:   To determine nrecevs, one can use PetscGatherNumberOfMessages()

171: .seealso: PetscGatherMessageLengths() and PetscGatherNumberOfMessages()
172: @*/
173: PetscErrorCode  PetscGatherMessageLengths2(MPI_Comm comm,PetscMPIInt nsends,PetscMPIInt nrecvs,const PetscMPIInt ilengths1[],const PetscMPIInt ilengths2[],PetscMPIInt **onodes,PetscMPIInt **olengths1,PetscMPIInt **olengths2)
174: {
176:   PetscMPIInt    size,tag,i,j,*buf_s = NULL,*buf_r = NULL,*buf_j = NULL;
177:   MPI_Request    *s_waits  = NULL,*r_waits = NULL;
178:   MPI_Status     *w_status = NULL;

181:   MPI_Comm_size(comm,&size);
182:   PetscCommGetNewTag(comm,&tag);

184:   /* cannot use PetscMalloc5() because r_waits and s_waits must be contiquous for the call to MPI_Waitall() */
185:   PetscMalloc4(nrecvs+nsends,MPI_Request,&r_waits,2*nrecvs,PetscMPIInt,&buf_r,2*nsends,PetscMPIInt,&buf_s,nrecvs+nsends,MPI_Status,&w_status);
186:   s_waits = r_waits + nrecvs;

188:   /* Post the Irecv to get the message length-info */
189:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),olengths1);
190:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),olengths2);
191:   for (i=0; i<nrecvs; i++) {
192:     buf_j = buf_r + (2*i);
193:     MPI_Irecv(buf_j,2,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits+i);
194:   }

196:   /* Post the Isends with the message length-info */
197:   for (i=0,j=0; i<size; ++i) {
198:     if (ilengths1[i]) {
199:       buf_j    = buf_s + (2*j);
200:       buf_j[0] = *(ilengths1+i);
201:       buf_j[1] = *(ilengths2+i);
202:       MPI_Isend(buf_j,2,MPI_INT,i,tag,comm,s_waits+j);
203:       j++;
204:     }
205:   }

207:   /* Post waits on sends and receivs */
208:   if (nrecvs+nsends) {MPI_Waitall(nrecvs+nsends,r_waits,w_status);}


211:   /* Pack up the received data */
212:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),onodes);
213:   for (i=0; i<nrecvs; ++i) {
214:     (*onodes)[i]    = w_status[i].MPI_SOURCE;
215:     buf_j           = buf_r + (2*i);
216:     (*olengths1)[i] = buf_j[0];
217:     (*olengths2)[i] = buf_j[1];
218:   }

220:   PetscFree4(r_waits,buf_r,buf_s,w_status);
221:   return(0);
222: }

224: /*

226:   Allocate a bufffer sufficient to hold messages of size specified in olengths.
227:   And post Irecvs on these buffers using node info from onodes

229:  */
232: PetscErrorCode  PetscPostIrecvInt(MPI_Comm comm,PetscMPIInt tag,PetscMPIInt nrecvs,const PetscMPIInt onodes[],const PetscMPIInt olengths[],PetscInt ***rbuf,MPI_Request **r_waits)
233: {
235:   PetscInt       **rbuf_t,i,len = 0;
236:   MPI_Request    *r_waits_t;

239:   /* compute memory required for recv buffers */
240:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */

242:   /* allocate memory for recv buffers */
243:   PetscMalloc((nrecvs+1)*sizeof(PetscInt*),&rbuf_t);
244:   PetscMalloc(len*sizeof(PetscInt),&rbuf_t[0]);
245:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

247:   /* Post the receives */
248:   PetscMalloc(nrecvs*sizeof(MPI_Request),&r_waits_t);
249:   for (i=0; i<nrecvs; ++i) {
250:     MPI_Irecv(rbuf_t[i],olengths[i],MPIU_INT,onodes[i],tag,comm,r_waits_t+i);
251:   }

253:   *rbuf    = rbuf_t;
254:   *r_waits = r_waits_t;
255:   return(0);
256: }

260: PetscErrorCode  PetscPostIrecvScalar(MPI_Comm comm,PetscMPIInt tag,PetscMPIInt nrecvs,const PetscMPIInt onodes[],const PetscMPIInt olengths[],PetscScalar ***rbuf,MPI_Request **r_waits)
261: {
263:   PetscMPIInt    i;
264:   PetscScalar    **rbuf_t;
265:   MPI_Request    *r_waits_t;
266:   PetscInt       len = 0;

269:   /* compute memory required for recv buffers */
270:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */

272:   /* allocate memory for recv buffers */
273:   PetscMalloc((nrecvs+1)*sizeof(PetscScalar*),&rbuf_t);
274:   PetscMalloc(len*sizeof(PetscScalar),&rbuf_t[0]);
275:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

277:   /* Post the receives */
278:   PetscMalloc(nrecvs*sizeof(MPI_Request),&r_waits_t);
279:   for (i=0; i<nrecvs; ++i) {
280:     MPI_Irecv(rbuf_t[i],olengths[i],MPIU_SCALAR,onodes[i],tag,comm,r_waits_t+i);
281:   }

283:   *rbuf    = rbuf_t;
284:   *r_waits = r_waits_t;
285:   return(0);
286: }