Actual source code: mmdense.c

petsc-3.14.6 2021-03-30
Report Typos and Errors

  2: /*
  3:    Support for the parallel dense matrix vector multiply
  4: */
  5: #include <../src/mat/impls/dense/mpi/mpidense.h>
  6: #include <petscblaslapack.h>

  8: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
  9: {
 10:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

 14:   /* Create local vector that is used to scatter into */
 15:   VecDestroy(&mdn->lvec);
 16:   if (mdn->A) {
 17:     MatCreateVecs(mdn->A,&mdn->lvec,NULL);
 18:     PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);
 19:   }
 20:   if (!mdn->Mvctx) {
 21:     PetscLayoutSetUp(mat->cmap);
 22:     PetscSFCreate(PetscObjectComm((PetscObject)mat),&mdn->Mvctx);
 23:     PetscSFSetGraphWithPattern(mdn->Mvctx,mat->cmap,PETSCSF_PATTERN_ALLGATHER);
 24:     PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);
 25:   }
 26:   return(0);
 27: }

 29: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);

 31: PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
 32: {
 34:   PetscInt       nmax,nstages_local,nstages,i,pos,max_no;

 37:   /* Allocate memory to hold all the submatrices */
 38:   if (scall != MAT_REUSE_MATRIX) {
 39:     PetscCalloc1(ismax+1,submat);
 40:   }
 41:   /* Determine the number of stages through which submatrices are done */
 42:   nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
 43:   if (!nmax) nmax = 1;
 44:   nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0);

 46:   /* Make sure every processor loops through the nstages */
 47:   MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));


 50:   for (i=0,pos=0; i<nstages; i++) {
 51:     if (pos+nmax <= ismax) max_no = nmax;
 52:     else if (pos == ismax) max_no = 0;
 53:     else                   max_no = ismax-pos;
 54:     MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
 55:     pos += max_no;
 56:   }
 57:   return(0);
 58: }
 59: /* -------------------------------------------------------------------------*/
 60: PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
 61: {
 62:   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
 63:   Mat            A  = c->A;
 64:   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
 66:   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
 67:   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
 68:   const PetscInt **irow,**icol,*irow_i;
 69:   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
 70:   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
 71:   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
 72:   PetscInt       is_no,jmax,**rmap,*rmap_i;
 73:   PetscInt       ctr_j,*sbuf1_j,*rbuf1_i;
 74:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 75:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
 76:   MPI_Comm       comm;
 77:   PetscScalar    **rbuf2,**sbuf2;
 78:   PetscBool      sorted;

 81:   PetscObjectGetComm((PetscObject)C,&comm);
 82:   tag0 = ((PetscObject)C)->tag;
 83:   MPI_Comm_rank(comm,&rank);
 84:   MPI_Comm_size(comm,&size);
 85:   m    = C->rmap->N;

 87:   /* Get some new tags to keep the communication clean */
 88:   PetscObjectGetNewTag((PetscObject)C,&tag1);

 90:   /* Check if the col indices are sorted */
 91:   for (i=0; i<ismax; i++) {
 92:     ISSorted(isrow[i],&sorted);
 93:     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
 94:     ISSorted(iscol[i],&sorted);
 95:     if (!sorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
 96:   }

 98:   PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);
 99:   for (i=0; i<ismax; i++) {
100:     ISGetIndices(isrow[i],&irow[i]);
101:     ISGetIndices(iscol[i],&icol[i]);
102:     ISGetLocalSize(isrow[i],&nrow[i]);
103:     ISGetLocalSize(iscol[i],&ncol[i]);
104:   }

106:   /* Create hash table for the mapping :row -> proc*/
107:   for (i=0,j=0; i<size; i++) {
108:     jmax = C->rmap->range[i+1];
109:     for (; j<jmax; j++) rtable[j] = i;
110:   }

112:   /* evaluate communication - mesg to who,length of mesg, and buffer space
113:      required. Based on this, buffers are allocated, and data copied into them*/
114:   PetscMalloc3(2*size,&w1,size,&w3,size,&w4);
115:   PetscArrayzero(w1,size*2); /* initialize work vector*/
116:   PetscArrayzero(w3,size); /* initialize work vector*/
117:   for (i=0; i<ismax; i++) {
118:     PetscArrayzero(w4,size); /* initialize work vector*/
119:     jmax   = nrow[i];
120:     irow_i = irow[i];
121:     for (j=0; j<jmax; j++) {
122:       row  = irow_i[j];
123:       proc = rtable[row];
124:       w4[proc]++;
125:     }
126:     for (j=0; j<size; j++) {
127:       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
128:     }
129:   }

131:   nrqs       = 0;              /* no of outgoing messages */
132:   msz        = 0;              /* total mesg length (for all procs) */
133:   w1[2*rank] = 0;              /* no mesg sent to self */
134:   w3[rank]   = 0;
135:   for (i=0; i<size; i++) {
136:     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
137:   }
138:   PetscMalloc1(nrqs+1,&pa); /*(proc -array)*/
139:   for (i=0,j=0; i<size; i++) {
140:     if (w1[2*i]) { pa[j] = i; j++; }
141:   }

143:   /* Each message would have a header = 1 + 2*(no of IS) + data */
144:   for (i=0; i<nrqs; i++) {
145:     j        = pa[i];
146:     w1[2*j] += w1[2*j+1] + 2* w3[j];
147:     msz     += w1[2*j];
148:   }
149:   /* Do a global reduction to determine how many messages to expect*/
150:   PetscMaxSum(comm,w1,&bsz,&nrqr);

152:   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
153:   PetscMalloc1(nrqr+1,&rbuf1);
154:   PetscMalloc1(nrqr*bsz,&rbuf1[0]);
155:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;

157:   /* Post the receives */
158:   PetscMalloc1(nrqr+1,&r_waits1);
159:   for (i=0; i<nrqr; ++i) {
160:     MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
161:   }

163:   /* Allocate Memory for outgoing messages */
164:   PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);
165:   PetscArrayzero(sbuf1,size);
166:   PetscArrayzero(ptr,size);
167:   {
168:     PetscInt *iptr = tmp,ict = 0;
169:     for (i=0; i<nrqs; i++) {
170:       j        = pa[i];
171:       iptr    += ict;
172:       sbuf1[j] = iptr;
173:       ict      = w1[2*j];
174:     }
175:   }

177:   /* Form the outgoing messages */
178:   /* Initialize the header space */
179:   for (i=0; i<nrqs; i++) {
180:     j           = pa[i];
181:     sbuf1[j][0] = 0;
182:     PetscArrayzero(sbuf1[j]+1,2*w3[j]);
183:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
184:   }

186:   /* Parse the isrow and copy data into outbuf */
187:   for (i=0; i<ismax; i++) {
188:     PetscArrayzero(ctr,size);
189:     irow_i = irow[i];
190:     jmax   = nrow[i];
191:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
192:       row  = irow_i[j];
193:       proc = rtable[row];
194:       if (proc != rank) { /* copy to the outgoing buf*/
195:         ctr[proc]++;
196:         *ptr[proc] = row;
197:         ptr[proc]++;
198:       }
199:     }
200:     /* Update the headers for the current IS */
201:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
202:       if ((ctr_j = ctr[j])) {
203:         sbuf1_j        = sbuf1[j];
204:         k              = ++sbuf1_j[0];
205:         sbuf1_j[2*k]   = ctr_j;
206:         sbuf1_j[2*k-1] = i;
207:       }
208:     }
209:   }

211:   /*  Now  post the sends */
212:   PetscMalloc1(nrqs+1,&s_waits1);
213:   for (i=0; i<nrqs; ++i) {
214:     j    = pa[i];
215:     MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
216:   }

218:   /* Post receives to capture the row_data from other procs */
219:   PetscMalloc1(nrqs+1,&r_waits2);
220:   PetscMalloc1(nrqs+1,&rbuf2);
221:   for (i=0; i<nrqs; i++) {
222:     j     = pa[i];
223:     count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
224:     PetscMalloc1(count+1,&rbuf2[i]);
225:     MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
226:   }

228:   /* Receive messages(row_nos) and then, pack and send off the rowvalues
229:      to the correct processors */

231:   PetscMalloc1(nrqr+1,&s_waits2);
232:   PetscMalloc1(nrqr+1,&r_status1);
233:   PetscMalloc1(nrqr+1,&sbuf2);

235:   {
236:     PetscScalar *sbuf2_i,*v_start;
237:     PetscInt    s_proc;
238:     for (i=0; i<nrqr; ++i) {
239:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
240:       s_proc  = r_status1[i].MPI_SOURCE;         /* send processor */
241:       rbuf1_i = rbuf1[idex];         /* Actual message from s_proc */
242:       /* no of rows = end - start; since start is array idex[], 0idex, whel end
243:          is length of the buffer - which is 1idex */
244:       start = 2*rbuf1_i[0] + 1;
245:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
246:       /* allocate memory sufficinet to hold all the row values */
247:       PetscMalloc1((end-start)*N,&sbuf2[idex]);
248:       sbuf2_i = sbuf2[idex];
249:       /* Now pack the data */
250:       for (j=start; j<end; j++) {
251:         row     = rbuf1_i[j] - rstart;
252:         v_start = a->v + row;
253:         for (k=0; k<N; k++) {
254:           sbuf2_i[0] = v_start[0];
255:           sbuf2_i++;
256:           v_start += a->lda;
257:         }
258:       }
259:       /* Now send off the data */
260:       MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
261:     }
262:   }
263:   /* End Send-Recv of IS + row_numbers */
264:   PetscFree(r_status1);
265:   PetscFree(r_waits1);
266:   PetscMalloc1(nrqs+1,&s_status1);
267:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
268:   PetscFree(s_status1);
269:   PetscFree(s_waits1);

271:   /* Create the submatrices */
272:   if (scall == MAT_REUSE_MATRIX) {
273:     for (i=0; i<ismax; i++) {
274:       mat = (Mat_SeqDense*)(submats[i]->data);
275:       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
276:       PetscArrayzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n);

278:       submats[i]->factortype = C->factortype;
279:     }
280:   } else {
281:     for (i=0; i<ismax; i++) {
282:       MatCreate(PETSC_COMM_SELF,submats+i);
283:       MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
284:       MatSetType(submats[i],((PetscObject)A)->type_name);
285:       MatSeqDenseSetPreallocation(submats[i],NULL);
286:     }
287:   }

289:   /* Assemble the matrices */
290:   {
291:     PetscInt    col;
292:     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;

294:     for (i=0; i<ismax; i++) {
295:       mat    = (Mat_SeqDense*)submats[i]->data;
296:       mat_v  = a->v;
297:       imat_v = mat->v;
298:       irow_i = irow[i];
299:       m      = nrow[i];
300:       for (j=0; j<m; j++) {
301:         row  = irow_i[j];
302:         proc = rtable[row];
303:         if (proc == rank) {
304:           row     = row - rstart;
305:           mat_vi  = mat_v + row;
306:           imat_vi = imat_v + j;
307:           for (k=0; k<ncol[i]; k++) {
308:             col          = icol[i][k];
309:             imat_vi[k*m] = mat_vi[col*a->lda];
310:           }
311:         }
312:       }
313:     }
314:   }

316:   /* Create row map-> This maps c->row to submat->row for each submat*/
317:   /* this is a very expensive operation wrt memory usage */
318:   PetscMalloc1(ismax,&rmap);
319:   PetscCalloc1(ismax*C->rmap->N,&rmap[0]);
320:   for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N;
321:   for (i=0; i<ismax; i++) {
322:     rmap_i = rmap[i];
323:     irow_i = irow[i];
324:     jmax   = nrow[i];
325:     for (j=0; j<jmax; j++) {
326:       rmap_i[irow_i[j]] = j;
327:     }
328:   }

330:   /* Now Receive the row_values and assemble the rest of the matrix */
331:   PetscMalloc1(nrqs+1,&r_status2);
332:   {
333:     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
334:     PetscScalar *rbuf2_i,*imat_v,*imat_vi;

336:     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
337:       MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
338:       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
339:       sbuf1_i = sbuf1[pa[i]];
340:       is_max  = sbuf1_i[0];
341:       ct1     = 2*is_max+1;
342:       rbuf2_i = rbuf2[i];
343:       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
344:         is_no  = sbuf1_i[2*j-1];
345:         is_sz  = sbuf1_i[2*j];
346:         mat    = (Mat_SeqDense*)submats[is_no]->data;
347:         imat_v = mat->v;
348:         rmap_i = rmap[is_no];
349:         m      = nrow[is_no];
350:         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
351:           row     = sbuf1_i[ct1]; ct1++;
352:           row     = rmap_i[row];
353:           imat_vi = imat_v + row;
354:           for (l=0; l<ncol[is_no]; l++) { /* For each col */
355:             col          = icol[is_no][l];
356:             imat_vi[l*m] = rbuf2_i[col];
357:           }
358:         }
359:       }
360:     }
361:   }
362:   /* End Send-Recv of row_values */
363:   PetscFree(r_status2);
364:   PetscFree(r_waits2);
365:   PetscMalloc1(nrqr+1,&s_status2);
366:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
367:   PetscFree(s_status2);
368:   PetscFree(s_waits2);

370:   /* Restore the indices */
371:   for (i=0; i<ismax; i++) {
372:     ISRestoreIndices(isrow[i],irow+i);
373:     ISRestoreIndices(iscol[i],icol+i);
374:   }

376:   PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);
377:   PetscFree3(w1,w3,w4);
378:   PetscFree(pa);

380:   for (i=0; i<nrqs; ++i) {
381:     PetscFree(rbuf2[i]);
382:   }
383:   PetscFree(rbuf2);
384:   PetscFree4(sbuf1,ptr,tmp,ctr);
385:   PetscFree(rbuf1[0]);
386:   PetscFree(rbuf1);

388:   for (i=0; i<nrqr; ++i) {
389:     PetscFree(sbuf2[i]);
390:   }

392:   PetscFree(sbuf2);
393:   PetscFree(rmap[0]);
394:   PetscFree(rmap);

396:   for (i=0; i<ismax; i++) {
397:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
398:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
399:   }
400:   return(0);
401: }

403: PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
404: {
405:   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;

409:   MatScale(A->A,alpha);
410:   return(0);
411: }