Actual source code: mmdense.c

petsc-3.10.5 2019-03-28
Report Typos and Errors

  2: /*
  3:    Support for the parallel dense matrix vector multiply
  4: */
  5:  #include <../src/mat/impls/dense/mpi/mpidense.h>
  6:  #include <petscblaslapack.h>

  8: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
  9: {
 10:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
 12:   IS             from,to;
 13:   Vec            gvec;

 16:   /* Create local vector that is used to scatter into */
 17:   VecCreateSeq(PETSC_COMM_SELF,mat->cmap->N,&mdn->lvec);

 19:   /* Create temporary index set for building scatter gather */
 20:   ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->N,0,1,&from);
 21:   ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&to);

 23:   /* Create temporary global vector to generate scatter context */
 24:   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */

 26:   VecCreateMPIWithArray(PetscObjectComm((PetscObject)mat),1,mdn->nvec,mat->cmap->N,NULL,&gvec);

 28:   /* Generate the scatter context */
 29:   VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);
 30:   PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);
 31:   PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);
 32:   PetscLogObjectParent((PetscObject)mat,(PetscObject)from);
 33:   PetscLogObjectParent((PetscObject)mat,(PetscObject)to);
 34:   PetscLogObjectParent((PetscObject)mat,(PetscObject)gvec);

 36:   ISDestroy(&to);
 37:   ISDestroy(&from);
 38:   VecDestroy(&gvec);
 39:   return(0);
 40: }

 42: extern PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
 43: PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
 44: {
 46:   PetscInt       nmax,nstages_local,nstages,i,pos,max_no;

 49:   /* Allocate memory to hold all the submatrices */
 50:   if (scall != MAT_REUSE_MATRIX) {
 51:     PetscCalloc1(ismax+1,submat);
 52:   }
 53:   /* Determine the number of stages through which submatrices are done */
 54:   nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
 55:   if (!nmax) nmax = 1;
 56:   nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0);

 58:   /* Make sure every processor loops through the nstages */
 59:   MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));


 62:   for (i=0,pos=0; i<nstages; i++) {
 63:     if (pos+nmax <= ismax) max_no = nmax;
 64:     else if (pos == ismax) max_no = 0;
 65:     else                   max_no = ismax-pos;
 66:     MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
 67:     pos += max_no;
 68:   }
 69:   return(0);
 70: }
 71: /* -------------------------------------------------------------------------*/
 72: PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
 73: {
 74:   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
 75:   Mat            A  = c->A;
 76:   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
 78:   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
 79:   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
 80:   const PetscInt **irow,**icol,*irow_i;
 81:   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
 82:   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
 83:   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
 84:   PetscInt       is_no,jmax,**rmap,*rmap_i;
 85:   PetscInt       ctr_j,*sbuf1_j,*rbuf1_i;
 86:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 87:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
 88:   MPI_Comm       comm;
 89:   PetscScalar    **rbuf2,**sbuf2;
 90:   PetscBool      sorted;

 93:   PetscObjectGetComm((PetscObject)C,&comm);
 94:   tag0 = ((PetscObject)C)->tag;
 95:   size = c->size;
 96:   rank = c->rank;
 97:   m    = C->rmap->N;

 99:   /* Get some new tags to keep the communication clean */
100:   PetscObjectGetNewTag((PetscObject)C,&tag1);

102:   /* Check if the col indices are sorted */
103:   for (i=0; i<ismax; i++) {
104:     ISSorted(isrow[i],&sorted);
105:     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
106:     ISSorted(iscol[i],&sorted);
107:     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
108:   }

110:   PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);
111:   for (i=0; i<ismax; i++) {
112:     ISGetIndices(isrow[i],&irow[i]);
113:     ISGetIndices(iscol[i],&icol[i]);
114:     ISGetLocalSize(isrow[i],&nrow[i]);
115:     ISGetLocalSize(iscol[i],&ncol[i]);
116:   }

118:   /* Create hash table for the mapping :row -> proc*/
119:   for (i=0,j=0; i<size; i++) {
120:     jmax = C->rmap->range[i+1];
121:     for (; j<jmax; j++) rtable[j] = i;
122:   }

124:   /* evaluate communication - mesg to who,length of mesg, and buffer space
125:      required. Based on this, buffers are allocated, and data copied into them*/
126:   PetscMalloc3(2*size,&w1,size,&w3,size,&w4);
127:   PetscMemzero(w1,size*2*sizeof(PetscInt)); /* initialize work vector*/
128:   PetscMemzero(w3,size*sizeof(PetscInt)); /* initialize work vector*/
129:   for (i=0; i<ismax; i++) {
130:     PetscMemzero(w4,size*sizeof(PetscInt)); /* initialize work vector*/
131:     jmax   = nrow[i];
132:     irow_i = irow[i];
133:     for (j=0; j<jmax; j++) {
134:       row  = irow_i[j];
135:       proc = rtable[row];
136:       w4[proc]++;
137:     }
138:     for (j=0; j<size; j++) {
139:       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
140:     }
141:   }

143:   nrqs       = 0;              /* no of outgoing messages */
144:   msz        = 0;              /* total mesg length (for all procs) */
145:   w1[2*rank] = 0;              /* no mesg sent to self */
146:   w3[rank]   = 0;
147:   for (i=0; i<size; i++) {
148:     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
149:   }
150:   PetscMalloc1(nrqs+1,&pa); /*(proc -array)*/
151:   for (i=0,j=0; i<size; i++) {
152:     if (w1[2*i]) { pa[j] = i; j++; }
153:   }

155:   /* Each message would have a header = 1 + 2*(no of IS) + data */
156:   for (i=0; i<nrqs; i++) {
157:     j        = pa[i];
158:     w1[2*j] += w1[2*j+1] + 2* w3[j];
159:     msz     += w1[2*j];
160:   }
161:   /* Do a global reduction to determine how many messages to expect*/
162:   PetscMaxSum(comm,w1,&bsz,&nrqr);

164:   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
165:   PetscMalloc1(nrqr+1,&rbuf1);
166:   PetscMalloc1(nrqr*bsz,&rbuf1[0]);
167:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;

169:   /* Post the receives */
170:   PetscMalloc1(nrqr+1,&r_waits1);
171:   for (i=0; i<nrqr; ++i) {
172:     MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
173:   }

175:   /* Allocate Memory for outgoing messages */
176:   PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);
177:   PetscMemzero(sbuf1,size*sizeof(PetscInt*));
178:   PetscMemzero(ptr,size*sizeof(PetscInt*));
179:   {
180:     PetscInt *iptr = tmp,ict = 0;
181:     for (i=0; i<nrqs; i++) {
182:       j        = pa[i];
183:       iptr    += ict;
184:       sbuf1[j] = iptr;
185:       ict      = w1[2*j];
186:     }
187:   }

189:   /* Form the outgoing messages */
190:   /* Initialize the header space */
191:   for (i=0; i<nrqs; i++) {
192:     j           = pa[i];
193:     sbuf1[j][0] = 0;
194:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));
195:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
196:   }

198:   /* Parse the isrow and copy data into outbuf */
199:   for (i=0; i<ismax; i++) {
200:     PetscMemzero(ctr,size*sizeof(PetscInt));
201:     irow_i = irow[i];
202:     jmax   = nrow[i];
203:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
204:       row  = irow_i[j];
205:       proc = rtable[row];
206:       if (proc != rank) { /* copy to the outgoing buf*/
207:         ctr[proc]++;
208:         *ptr[proc] = row;
209:         ptr[proc]++;
210:       }
211:     }
212:     /* Update the headers for the current IS */
213:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
214:       if ((ctr_j = ctr[j])) {
215:         sbuf1_j        = sbuf1[j];
216:         k              = ++sbuf1_j[0];
217:         sbuf1_j[2*k]   = ctr_j;
218:         sbuf1_j[2*k-1] = i;
219:       }
220:     }
221:   }

223:   /*  Now  post the sends */
224:   PetscMalloc1(nrqs+1,&s_waits1);
225:   for (i=0; i<nrqs; ++i) {
226:     j    = pa[i];
227:     MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
228:   }

230:   /* Post recieves to capture the row_data from other procs */
231:   PetscMalloc1(nrqs+1,&r_waits2);
232:   PetscMalloc1(nrqs+1,&rbuf2);
233:   for (i=0; i<nrqs; i++) {
234:     j     = pa[i];
235:     count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
236:     PetscMalloc1(count+1,&rbuf2[i]);
237:     MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
238:   }

240:   /* Receive messages(row_nos) and then, pack and send off the rowvalues
241:      to the correct processors */

243:   PetscMalloc1(nrqr+1,&s_waits2);
244:   PetscMalloc1(nrqr+1,&r_status1);
245:   PetscMalloc1(nrqr+1,&sbuf2);

247:   {
248:     PetscScalar *sbuf2_i,*v_start;
249:     PetscInt    s_proc;
250:     for (i=0; i<nrqr; ++i) {
251:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
252:       s_proc  = r_status1[i].MPI_SOURCE;         /* send processor */
253:       rbuf1_i = rbuf1[idex];         /* Actual message from s_proc */
254:       /* no of rows = end - start; since start is array idex[], 0idex, whel end
255:          is length of the buffer - which is 1idex */
256:       start = 2*rbuf1_i[0] + 1;
257:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
258:       /* allocate memory sufficinet to hold all the row values */
259:       PetscMalloc1((end-start)*N,&sbuf2[idex]);
260:       sbuf2_i = sbuf2[idex];
261:       /* Now pack the data */
262:       for (j=start; j<end; j++) {
263:         row     = rbuf1_i[j] - rstart;
264:         v_start = a->v + row;
265:         for (k=0; k<N; k++) {
266:           sbuf2_i[0] = v_start[0];
267:           sbuf2_i++;
268:           v_start += C->rmap->n;
269:         }
270:       }
271:       /* Now send off the data */
272:       MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
273:     }
274:   }
275:   /* End Send-Recv of IS + row_numbers */
276:   PetscFree(r_status1);
277:   PetscFree(r_waits1);
278:   PetscMalloc1(nrqs+1,&s_status1);
279:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
280:   PetscFree(s_status1);
281:   PetscFree(s_waits1);

283:   /* Create the submatrices */
284:   if (scall == MAT_REUSE_MATRIX) {
285:     for (i=0; i<ismax; i++) {
286:       mat = (Mat_SeqDense*)(submats[i]->data);
287:       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
288:       PetscMemzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n*sizeof(PetscScalar));

290:       submats[i]->factortype = C->factortype;
291:     }
292:   } else {
293:     for (i=0; i<ismax; i++) {
294:       MatCreate(PETSC_COMM_SELF,submats+i);
295:       MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
296:       MatSetType(submats[i],((PetscObject)A)->type_name);
297:       MatSeqDenseSetPreallocation(submats[i],NULL);
298:     }
299:   }

301:   /* Assemble the matrices */
302:   {
303:     PetscInt    col;
304:     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;

306:     for (i=0; i<ismax; i++) {
307:       mat    = (Mat_SeqDense*)submats[i]->data;
308:       mat_v  = a->v;
309:       imat_v = mat->v;
310:       irow_i = irow[i];
311:       m      = nrow[i];
312:       for (j=0; j<m; j++) {
313:         row  = irow_i[j];
314:         proc = rtable[row];
315:         if (proc == rank) {
316:           row     = row - rstart;
317:           mat_vi  = mat_v + row;
318:           imat_vi = imat_v + j;
319:           for (k=0; k<ncol[i]; k++) {
320:             col          = icol[i][k];
321:             imat_vi[k*m] = mat_vi[col*C->rmap->n];
322:           }
323:         }
324:       }
325:     }
326:   }

328:   /* Create row map-> This maps c->row to submat->row for each submat*/
329:   /* this is a very expensive operation wrt memory usage */
330:   PetscMalloc1(ismax,&rmap);
331:   PetscMalloc1(ismax*C->rmap->N,&rmap[0]);
332:   PetscMemzero(rmap[0],ismax*C->rmap->N*sizeof(PetscInt));
333:   for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N;
334:   for (i=0; i<ismax; i++) {
335:     rmap_i = rmap[i];
336:     irow_i = irow[i];
337:     jmax   = nrow[i];
338:     for (j=0; j<jmax; j++) {
339:       rmap_i[irow_i[j]] = j;
340:     }
341:   }

343:   /* Now Receive the row_values and assemble the rest of the matrix */
344:   PetscMalloc1(nrqs+1,&r_status2);
345:   {
346:     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
347:     PetscScalar *rbuf2_i,*imat_v,*imat_vi;

349:     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
350:       MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
351:       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
352:       sbuf1_i = sbuf1[pa[i]];
353:       is_max  = sbuf1_i[0];
354:       ct1     = 2*is_max+1;
355:       rbuf2_i = rbuf2[i];
356:       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
357:         is_no  = sbuf1_i[2*j-1];
358:         is_sz  = sbuf1_i[2*j];
359:         mat    = (Mat_SeqDense*)submats[is_no]->data;
360:         imat_v = mat->v;
361:         rmap_i = rmap[is_no];
362:         m      = nrow[is_no];
363:         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
364:           row     = sbuf1_i[ct1]; ct1++;
365:           row     = rmap_i[row];
366:           imat_vi = imat_v + row;
367:           for (l=0; l<ncol[is_no]; l++) { /* For each col */
368:             col          = icol[is_no][l];
369:             imat_vi[l*m] = rbuf2_i[col];
370:           }
371:         }
372:       }
373:     }
374:   }
375:   /* End Send-Recv of row_values */
376:   PetscFree(r_status2);
377:   PetscFree(r_waits2);
378:   PetscMalloc1(nrqr+1,&s_status2);
379:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
380:   PetscFree(s_status2);
381:   PetscFree(s_waits2);

383:   /* Restore the indices */
384:   for (i=0; i<ismax; i++) {
385:     ISRestoreIndices(isrow[i],irow+i);
386:     ISRestoreIndices(iscol[i],icol+i);
387:   }

389:   PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);
390:   PetscFree3(w1,w3,w4);
391:   PetscFree(pa);

393:   for (i=0; i<nrqs; ++i) {
394:     PetscFree(rbuf2[i]);
395:   }
396:   PetscFree(rbuf2);
397:   PetscFree4(sbuf1,ptr,tmp,ctr);
398:   PetscFree(rbuf1[0]);
399:   PetscFree(rbuf1);

401:   for (i=0; i<nrqr; ++i) {
402:     PetscFree(sbuf2[i]);
403:   }

405:   PetscFree(sbuf2);
406:   PetscFree(rmap[0]);
407:   PetscFree(rmap);

409:   for (i=0; i<ismax; i++) {
410:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
411:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
412:   }
413:   return(0);
414: }

416: PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
417: {
418:   Mat_MPIDense   *A     = (Mat_MPIDense*)inA->data;
419:   Mat_SeqDense   *a     = (Mat_SeqDense*)A->A->data;
420:   PetscScalar    oalpha = alpha;
422:   PetscBLASInt   one = 1,nz;

425:   PetscBLASIntCast(inA->rmap->n*inA->cmap->N,&nz);
426:   PetscStackCallBLAS("BLASscal",BLASscal_(&nz,&oalpha,a->v,&one));
427:   PetscLogFlops(nz);
428:   return(0);
429: }