Actual source code: mpiptap.c

petsc-3.5.4 2015-05-23
Report Typos and Errors
  2: /*
  3:   Defines projective product routines where A is a MPIAIJ matrix
  4:           C = P^T * A * P
  5: */

  7: #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
  8: #include <../src/mat/utils/freespace.h>
  9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
 10: #include <petscbt.h>
 11: #include <petsctime.h>

 13: /* #define PTAP_PROFILE */

 15: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
 18: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
 19: {
 21:   Mat_MPIAIJ     *a   =(Mat_MPIAIJ*)A->data;
 22:   Mat_PtAPMPI    *ptap=a->ptap;

 25:   if (ptap) {
 26:     Mat_Merge_SeqsToMPI *merge=ptap->merge;
 27:     PetscFree2(ptap->startsj_s,ptap->startsj_r);
 28:     PetscFree(ptap->bufa);
 29:     MatDestroy(&ptap->P_loc);
 30:     MatDestroy(&ptap->P_oth);
 31:     MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
 32:     if (ptap->api) {PetscFree(ptap->api);}
 33:     if (ptap->apj) {PetscFree(ptap->apj);}
 34:     if (ptap->apa) {PetscFree(ptap->apa);}
 35:     if (merge) {
 36:       PetscFree(merge->id_r);
 37:       PetscFree(merge->len_s);
 38:       PetscFree(merge->len_r);
 39:       PetscFree(merge->bi);
 40:       PetscFree(merge->bj);
 41:       PetscFree(merge->buf_ri[0]);
 42:       PetscFree(merge->buf_ri);
 43:       PetscFree(merge->buf_rj[0]);
 44:       PetscFree(merge->buf_rj);
 45:       PetscFree(merge->coi);
 46:       PetscFree(merge->coj);
 47:       PetscFree(merge->owners_co);
 48:       PetscLayoutDestroy(&merge->rowmap);
 49:       merge->destroy(A);
 50:       PetscFree(ptap->merge);
 51:     }
 52:     PetscFree(ptap);
 53:   }
 54:   return(0);
 55: }

 59: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
 60: {
 61:   PetscErrorCode      ierr;
 62:   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
 63:   Mat_PtAPMPI         *ptap  = a->ptap;
 64:   Mat_Merge_SeqsToMPI *merge = ptap->merge;

 67:   (*merge->duplicate)(A,op,M);

 69:   (*M)->ops->destroy   = merge->destroy;
 70:   (*M)->ops->duplicate = merge->duplicate;
 71:   return(0);
 72: }

 76: PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
 77: {

 81:   if (scall == MAT_INITIAL_MATRIX) {
 82:     PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
 83:     MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
 84:     PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
 85:   }
 86:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
 87:   MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);
 88:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
 89:   return(0);
 90: }

 94: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
 95: {
 96:   PetscErrorCode      ierr;
 97:   Mat                 Cmpi;
 98:   Mat_PtAPMPI         *ptap;
 99:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
100:   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
101:   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
102:   Mat_SeqAIJ          *p_loc,*p_oth;
103:   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
104:   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
105:   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
106:   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
107:   PetscBT             lnkbt;
108:   MPI_Comm            comm;
109:   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
110:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
111:   PetscInt            len,proc,*dnz,*onz,*owners;
112:   PetscInt            nzi,*pti,*ptj;
113:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
114:   MPI_Request         *swaits,*rwaits;
115:   MPI_Status          *sstatus,rstatus;
116:   Mat_Merge_SeqsToMPI *merge;
117:   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
118:   PetscReal           afill=1.0,afill_tmp;
119:   PetscInt            rmax;
120: #if defined(PTAP_PROFILE)
121:   PetscLogDouble t0,t1,t2,t3,t4;
122: #endif

125:   PetscObjectGetComm((PetscObject)A,&comm);
126: #if defined(PTAP_PROFILE)
127:   PetscTime(&t0);
128: #endif

130:   /* check if matrix local sizes are compatible */
131:   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
132:     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
133:   }
134:   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
135:     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
136:   }

138:   MPI_Comm_size(comm,&size);
139:   MPI_Comm_rank(comm,&rank);

141:   /* create struct Mat_PtAPMPI and attached it to C later */
142:   PetscNew(&ptap);
143:   PetscNew(&merge);
144:   ptap->merge = merge;
145:   ptap->reuse = MAT_INITIAL_MATRIX;

147:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
148:   MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);

150:   /* get P_loc by taking all local rows of P */
151:   MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);

153:   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
154:   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
155:   pi_loc = p_loc->i; pj_loc = p_loc->j;
156:   pi_oth = p_oth->i; pj_oth = p_oth->j;
157: #if defined(PTAP_PROFILE)
158:   PetscTime(&t1);
159: #endif

161:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
162:   /*-------------------------------------------------------------------*/
163:   PetscMalloc1((am+1),&api);
164:   api[0] = 0;

166:   /* create and initialize a linked list */
167:   PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);

169:   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
170:   PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);

172:   current_space = free_space;

174:   for (i=0; i<am; i++) {
175:     /* diagonal portion of A */
176:     nzi = adi[i+1] - adi[i];
177:     aj  = ad->j + adi[i];
178:     for (j=0; j<nzi; j++) {
179:       row  = aj[j];
180:       pnz  = pi_loc[row+1] - pi_loc[row];
181:       Jptr = pj_loc + pi_loc[row];
182:       /* add non-zero cols of P into the sorted linked list lnk */
183:       PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
184:     }
185:     /* off-diagonal portion of A */
186:     nzi = aoi[i+1] - aoi[i];
187:     aj  = ao->j + aoi[i];
188:     for (j=0; j<nzi; j++) {
189:       row  = aj[j];
190:       pnz  = pi_oth[row+1] - pi_oth[row];
191:       Jptr = pj_oth + pi_oth[row];
192:       PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
193:     }
194:     apnz     = lnk[0];
195:     api[i+1] = api[i] + apnz;
196:     if (ap_rmax < apnz) ap_rmax = apnz;

198:     /* if free space is not available, double the total space in the list */
199:     if (current_space->local_remaining<apnz) {
200:       PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);
201:       nspacedouble++;
202:     }

204:     /* Copy data into free space, then initialize lnk */
205:     PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);

207:     current_space->array           += apnz;
208:     current_space->local_used      += apnz;
209:     current_space->local_remaining -= apnz;
210:   }

212:   /* Allocate space for apj, initialize apj, and */
213:   /* destroy list of free space and other temporary array(s) */
214:   PetscMalloc1((api[am]+1),&apj);
215:   PetscFreeSpaceContiguous(&free_space,apj);
216:   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
217:   if (afill_tmp > afill) afill = afill_tmp;

219: #if defined(PTAP_PROFILE)
220:   PetscTime(&t2);
221: #endif

223:   /* determine symbolic Co=(p->B)^T*AP - send to others */
224:   /*----------------------------------------------------*/
225:   MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);

227:   /* then, compute symbolic Co = (p->B)^T*AP */
228:   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
229:                          >= (num of nonzero rows of C_seq) - pn */
230:   PetscMalloc1((pon+1),&coi);
231:   coi[0] = 0;

233:   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
234:   nnz           = fill*(poti[pon] + api[am]);
235:   PetscFreeSpaceGet(nnz,&free_space);
236:   current_space = free_space;

238:   for (i=0; i<pon; i++) {
239:     pnz = poti[i+1] - poti[i];
240:     ptJ = potj + poti[i];
241:     for (j=0; j<pnz; j++) {
242:       row  = ptJ[j]; /* row of AP == col of Pot */
243:       apnz = api[row+1] - api[row];
244:       Jptr = apj + api[row];
245:       /* add non-zero cols of AP into the sorted linked list lnk */
246:       PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
247:     }
248:     nnz = lnk[0];

250:     /* If free space is not available, double the total space in the list */
251:     if (current_space->local_remaining<nnz) {
252:       PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);
253:       nspacedouble++;
254:     }

256:     /* Copy data into free space, and zero out denserows */
257:     PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);

259:     current_space->array           += nnz;
260:     current_space->local_used      += nnz;
261:     current_space->local_remaining -= nnz;

263:     coi[i+1] = coi[i] + nnz;
264:   }
265:   PetscMalloc1((coi[pon]+1),&coj);
266:   PetscFreeSpaceContiguous(&free_space,coj);
267:   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
268:   if (afill_tmp > afill) afill = afill_tmp;
269:   MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);

271:   /* send j-array (coj) of Co to other processors */
272:   /*----------------------------------------------*/
273:   /* determine row ownership */
274:   PetscLayoutCreate(comm,&merge->rowmap);
275:   merge->rowmap->n  = pn;
276:   merge->rowmap->bs = 1;

278:   PetscLayoutSetUp(merge->rowmap);
279:   owners = merge->rowmap->range;

281:   /* determine the number of messages to send, their lengths */
282:   PetscMalloc2(size,&len_si,size,&sstatus);
283:   PetscMemzero(len_si,size*sizeof(PetscMPIInt));
284:   PetscCalloc1(size,&merge->len_s);

286:   len_s        = merge->len_s;
287:   merge->nsend = 0;

289:   PetscMalloc1((size+2),&owners_co);

291:   proc = 0;
292:   for (i=0; i<pon; i++) {
293:     while (prmap[i] >= owners[proc+1]) proc++;
294:     len_si[proc]++;  /* num of rows in Co(=Pt*AP) to be sent to [proc] -- could be empty row!!! */
295:     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
296:   }

298:   len          = 0; /* max length of buf_si[] */
299:   owners_co[0] = 0;
300:   for (proc=0; proc<size; proc++) {
301:     owners_co[proc+1] = owners_co[proc] + len_si[proc];
302:     if (len_s[proc]) {
303:       merge->nsend++;
304:       len_si[proc] = 2*(len_si[proc] + 1);
305:       len         += len_si[proc];
306:     }
307:   }

309:   /* determine the number and length of messages to receive for coi and coj  */
310:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
311:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

313:   /* post the Irecv and Isend of coj */
314:   PetscCommGetNewTag(comm,&tagj);
315:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
316:   PetscMalloc1((merge->nsend+1),&swaits);
317:   for (proc=0, k=0; proc<size; proc++) {
318:     if (!len_s[proc]) continue;
319:     i    = owners_co[proc];
320:     MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
321:     k++;
322:   }

324:   /* receives and sends of coj are complete */
325:   for (i=0; i<merge->nrecv; i++) {
326:     MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
327:   }
328:   PetscFree(rwaits);
329:   if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}

331:   /* send and recv coi */
332:   /*-------------------*/
333:   PetscCommGetNewTag(comm,&tagi);
334:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
335:   PetscMalloc1((len+1),&buf_s);
336:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
337:   for (proc=0,k=0; proc<size; proc++) {
338:     if (!len_s[proc]) continue;
339:     /* form outgoing message for i-structure:
340:          buf_si[0]:                 nrows to be sent
341:                [1:nrows]:           row index (global)
342:                [nrows+1:2*nrows+1]: i-structure index
343:     */
344:     /*-------------------------------------------*/
345:     nrows       = len_si[proc]/2 - 1;
346:     buf_si_i    = buf_si + nrows+1;
347:     buf_si[0]   = nrows;
348:     buf_si_i[0] = 0;
349:     nrows       = 0;
350:     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
351:       nzi = coi[i+1] - coi[i];

353:       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
354:       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
355:       nrows++;
356:     }
357:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
358:     k++;
359:     buf_si += len_si[proc];
360:   }
361:   i = merge->nrecv;
362:   while (i--) {
363:     MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
364:   }
365:   PetscFree(rwaits);
366:   if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}

368:   PetscFree2(len_si,sstatus);
369:   PetscFree(len_ri);
370:   PetscFree(swaits);
371:   PetscFree(buf_s);

373: #if defined(PTAP_PROFILE)
374:   PetscTime(&t3);
375: #endif

377:   /* compute the local portion of C (mpi mat) */
378:   /*------------------------------------------*/
379:   MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);

381:   /* allocate pti array and free space for accumulating nonzero column info */
382:   PetscMalloc1((pn+1),&pti);
383:   pti[0] = 0;

385:   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
386:   nnz           = fill*(pi_loc[pm] + api[am]);
387:   PetscFreeSpaceGet(nnz,&free_space);
388:   current_space = free_space;

390:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
391:   for (k=0; k<merge->nrecv; k++) {
392:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
393:     nrows       = *buf_ri_k[k];
394:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
395:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
396:   }
397:   MatPreallocateInitialize(comm,pn,pn,dnz,onz);
398:   rmax = 0;
399:   for (i=0; i<pn; i++) {
400:     /* add pdt[i,:]*AP into lnk */
401:     pnz = pdti[i+1] - pdti[i];
402:     ptJ = pdtj + pdti[i];
403:     for (j=0; j<pnz; j++) {
404:       row  = ptJ[j];  /* row of AP == col of Pt */
405:       apnz = api[row+1] - api[row];
406:       Jptr = apj + api[row];
407:       /* add non-zero cols of AP into the sorted linked list lnk */
408:       PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
409:     }

411:     /* add received col data into lnk */
412:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
413:       if (i == *nextrow[k]) { /* i-th row */
414:         nzi  = *(nextci[k]+1) - *nextci[k];
415:         Jptr = buf_rj[k] + *nextci[k];
416:         PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
417:         nextrow[k]++; nextci[k]++;
418:       }
419:     }
420:     nnz = lnk[0];

422:     /* if free space is not available, make more free space */
423:     if (current_space->local_remaining<nnz) {
424:       PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);
425:       nspacedouble++;
426:     }
427:     /* copy data into free space, then initialize lnk */
428:     PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
429:     MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);

431:     current_space->array           += nnz;
432:     current_space->local_used      += nnz;
433:     current_space->local_remaining -= nnz;

435:     pti[i+1] = pti[i] + nnz;
436:     if (nnz > rmax) rmax = nnz;
437:   }
438:   MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
439:   PetscFree3(buf_ri_k,nextrow,nextci);

441:   PetscMalloc1((pti[pn]+1),&ptj);
442:   PetscFreeSpaceContiguous(&free_space,ptj);
443:   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
444:   if (afill_tmp > afill) afill = afill_tmp;
445:   PetscLLDestroy(lnk,lnkbt);

447:   /* create symbolic parallel matrix Cmpi */
448:   /*--------------------------------------*/
449:   MatCreate(comm,&Cmpi);
450:   MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
451:   MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
452:   MatSetType(Cmpi,MATMPIAIJ);
453:   MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
454:   MatPreallocateFinalize(dnz,onz);

456:   merge->bi        = pti;      /* Cseq->i */
457:   merge->bj        = ptj;      /* Cseq->j */
458:   merge->coi       = coi;      /* Co->i   */
459:   merge->coj       = coj;      /* Co->j   */
460:   merge->buf_ri    = buf_ri;
461:   merge->buf_rj    = buf_rj;
462:   merge->owners_co = owners_co;
463:   merge->destroy   = Cmpi->ops->destroy;
464:   merge->duplicate = Cmpi->ops->duplicate;

466:   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
467:   Cmpi->assembled      = PETSC_FALSE;
468:   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
469:   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;

471:   /* attach the supporting struct to Cmpi for reuse */
472:   c           = (Mat_MPIAIJ*)Cmpi->data;
473:   c->ptap     = ptap;
474:   ptap->api   = api;
475:   ptap->apj   = apj;
476:   ptap->rmax  = ap_rmax;
477:   *C          = Cmpi;

479:   /* flag 'scalable' determines which implementations to be used:
480:        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
481:        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
482:   /* set default scalable */
483:   ptap->scalable = PETSC_TRUE;

485:   PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);
486:   if (!ptap->scalable) {  /* Do dense axpy */
487:     PetscCalloc1(pN,&ptap->apa);
488:   } else {
489:     PetscCalloc1(ap_rmax+1,&ptap->apa);
490:   }

492: #if defined(PTAP_PROFILE)
493:   PetscTime(&t4);
494:   if (rank==1) PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPSymbolic %g/P + %g/AP + %g/comm + %g/PtAP = %g\n",rank,t1-t0,t2-t1,t3-t2,t4-t3,t4-t0);
495: #endif

497: #if defined(PETSC_USE_INFO)
498:   if (pti[pn] != 0) {
499:     PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
500:     PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);
501:   } else {
502:     PetscInfo(Cmpi,"Empty matrix product\n");
503:   }
504: #endif
505:   return(0);
506: }

510: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
511: {
512:   PetscErrorCode      ierr;
513:   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
514:   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
515:   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
516:   Mat_SeqAIJ          *p_loc,*p_oth;
517:   Mat_PtAPMPI         *ptap;
518:   Mat_Merge_SeqsToMPI *merge;
519:   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
520:   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
521:   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
522:   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
523:   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
524:   MPI_Comm            comm;
525:   PetscMPIInt         size,rank,taga,*len_s;
526:   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
527:   PetscInt            **buf_ri,**buf_rj;
528:   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
529:   MPI_Request         *s_waits,*r_waits;
530:   MPI_Status          *status;
531:   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
532:   PetscInt            *api,*apj,*coi,*coj;
533:   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
534:   PetscBool           scalable;
535: #if defined(PTAP_PROFILE)
536:   PetscLogDouble t0,t1,t2,t3,t4,et2_AP=0.0,et2_PtAP=0.0,t2_0,t2_1,t2_2;
537: #endif

540:   PetscObjectGetComm((PetscObject)C,&comm);
541: #if defined(PTAP_PROFILE)
542:   PetscTime(&t0);
543: #endif
544:   MPI_Comm_size(comm,&size);
545:   MPI_Comm_rank(comm,&rank);

547:   ptap = c->ptap;
548:   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
549:   merge    = ptap->merge;
550:   apa      = ptap->apa;
551:   scalable = ptap->scalable;

553:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
554:   /*--------------------------------------------------*/
555:   if (ptap->reuse == MAT_INITIAL_MATRIX) {
556:     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
557:     ptap->reuse = MAT_REUSE_MATRIX;
558:   } else { /* update numerical values of P_oth and P_loc */
559:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
560:     MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
561:   }
562: #if defined(PTAP_PROFILE)
563:   PetscTime(&t1);
564: #endif

566:   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
567:   /*--------------------------------------------------------------*/
568:   /* get data from symbolic products */
569:   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
570:   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
571:   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
572:   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;

574:   coi  = merge->coi; coj = merge->coj;
575:   PetscCalloc1(coi[pon]+1,&coa);

577:   bi     = merge->bi; bj = merge->bj;
578:   owners = merge->rowmap->range;
579:   PetscCalloc1(bi[cm]+1,&ba);  /* ba: Cseq->a */

581:   api = ptap->api; apj = ptap->apj;

583:   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but fast */
584:     PetscInfo(C,"Using non-scalable dense axpy\n");
585:     /*-----------------------------------------------------------------------------------------------------*/
586:     for (i=0; i<am; i++) {
587: #if defined(PTAP_PROFILE)
588:       PetscTime(&t2_0);
589: #endif
590:       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
591:       /*------------------------------------------------------------*/
592:       apJ = apj + api[i];

594:       /* diagonal portion of A */
595:       anz = adi[i+1] - adi[i];
596:       adj = ad->j + adi[i];
597:       ada = ad->a + adi[i];
598:       for (j=0; j<anz; j++) {
599:         row = adj[j];
600:         pnz = pi_loc[row+1] - pi_loc[row];
601:         pj  = pj_loc + pi_loc[row];
602:         pa  = pa_loc + pi_loc[row];

604:         /* perform dense axpy */
605:         valtmp = ada[j];
606:         for (k=0; k<pnz; k++) {
607:           apa[pj[k]] += valtmp*pa[k];
608:         }
609:         PetscLogFlops(2.0*pnz);
610:       }

612:       /* off-diagonal portion of A */
613:       anz = aoi[i+1] - aoi[i];
614:       aoj = ao->j + aoi[i];
615:       aoa = ao->a + aoi[i];
616:       for (j=0; j<anz; j++) {
617:         row = aoj[j];
618:         pnz = pi_oth[row+1] - pi_oth[row];
619:         pj  = pj_oth + pi_oth[row];
620:         pa  = pa_oth + pi_oth[row];

622:         /* perform dense axpy */
623:         valtmp = aoa[j];
624:         for (k=0; k<pnz; k++) {
625:           apa[pj[k]] += valtmp*pa[k];
626:         }
627:         PetscLogFlops(2.0*pnz);
628:       }
629: #if defined(PTAP_PROFILE)
630:       PetscTime(&t2_1);
631:       et2_AP += t2_1 - t2_0;
632: #endif

634:       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
635:       /*--------------------------------------------------------------*/
636:       apnz = api[i+1] - api[i];
637:       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
638:       pnz = po->i[i+1] - po->i[i];
639:       poJ = po->j + po->i[i];
640:       pA  = po->a + po->i[i];
641:       for (j=0; j<pnz; j++) {
642:         row = poJ[j];
643:         cnz = coi[row+1] - coi[row];
644:         cj  = coj + coi[row];
645:         ca  = coa + coi[row];
646:         /* perform dense axpy */
647:         valtmp = pA[j];
648:         for (k=0; k<cnz; k++) {
649:           ca[k] += valtmp*apa[cj[k]];
650:         }
651:         PetscLogFlops(2.0*cnz);
652:       }

654:       /* put the value into Cd (diagonal part) */
655:       pnz = pd->i[i+1] - pd->i[i];
656:       pdJ = pd->j + pd->i[i];
657:       pA  = pd->a + pd->i[i];
658:       for (j=0; j<pnz; j++) {
659:         row = pdJ[j];
660:         cnz = bi[row+1] - bi[row];
661:         cj  = bj + bi[row];
662:         ca  = ba + bi[row];
663:         /* perform dense axpy */
664:         valtmp = pA[j];
665:         for (k=0; k<cnz; k++) {
666:           ca[k] += valtmp*apa[cj[k]];
667:         }
668:         PetscLogFlops(2.0*cnz);
669:       }

671:       /* zero the current row of A*P */
672:       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
673: #if defined(PTAP_PROFILE)
674:       PetscTime(&t2_2);
675:       et2_PtAP += t2_2 - t2_1;
676: #endif
677:     }
678:   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
679:     PetscInfo(C,"Using scalable sparse axpy\n");
680:     /*-----------------------------------------------------------------------------------------*/
681:     pA=pa_loc;
682:     for (i=0; i<am; i++) {
683: #if defined(PTAP_PROFILE)
684:       PetscTime(&t2_0);
685: #endif
686:       /* form i-th sparse row of A*P */
687:       apnz = api[i+1] - api[i];
688:       apJ  = apj + api[i];
689:       /* diagonal portion of A */
690:       anz = adi[i+1] - adi[i];
691:       adj = ad->j + adi[i];
692:       ada = ad->a + adi[i];
693:       for (j=0; j<anz; j++) {
694:         row    = adj[j];
695:         pnz    = pi_loc[row+1] - pi_loc[row];
696:         pj     = pj_loc + pi_loc[row];
697:         pa     = pa_loc + pi_loc[row];
698:         valtmp = ada[j];
699:         nextp  = 0;
700:         for (k=0; nextp<pnz; k++) {
701:           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
702:             apa[k] += valtmp*pa[nextp++];
703:           }
704:         }
705:         PetscLogFlops(2.0*pnz);
706:       }
707:       /* off-diagonal portion of A */
708:       anz = aoi[i+1] - aoi[i];
709:       aoj = ao->j + aoi[i];
710:       aoa = ao->a + aoi[i];
711:       for (j=0; j<anz; j++) {
712:         row    = aoj[j];
713:         pnz    = pi_oth[row+1] - pi_oth[row];
714:         pj     = pj_oth + pi_oth[row];
715:         pa     = pa_oth + pi_oth[row];
716:         valtmp = aoa[j];
717:         nextp  = 0;
718:         for (k=0; nextp<pnz; k++) {
719:           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
720:             apa[k] += valtmp*pa[nextp++];
721:           }
722:         }
723:         PetscLogFlops(2.0*pnz);
724:       }
725: #if defined(PTAP_PROFILE)
726:       PetscTime(&t2_1);
727:       et2_AP += t2_1 - t2_0;
728: #endif

730:       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
731:       /*--------------------------------------------------------------*/
732:       pnz = pi_loc[i+1] - pi_loc[i];
733:       pJ  = pj_loc + pi_loc[i];
734:       for (j=0; j<pnz; j++) {
735:         nextap = 0;
736:         row    = pJ[j]; /* global index */
737:         if (row < pcstart || row >=pcend) { /* put the value into Co */
738:           row = *poJ;
739:           cj  = coj + coi[row];
740:           ca  = coa + coi[row]; poJ++;
741:         } else {                            /* put the value into Cd */
742:           row = *pdJ;
743:           cj  = bj + bi[row];
744:           ca  = ba + bi[row]; pdJ++;
745:         }
746:         valtmp = pA[j];
747:         for (k=0; nextap<apnz; k++) {
748:           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
749:         }
750:         PetscLogFlops(2.0*apnz);
751:       }
752:       pA += pnz;
753:       /* zero the current row info for A*P */
754:       PetscMemzero(apa,apnz*sizeof(MatScalar));
755: #if defined(PTAP_PROFILE)
756:       PetscTime(&t2_2);
757:       et2_PtAP += t2_2 - t2_1;
758: #endif
759:     }
760:   }
761: #if defined(PTAP_PROFILE)
762:   PetscTime(&t2);
763: #endif

765:   /* 3) send and recv matrix values coa */
766:   /*------------------------------------*/
767:   buf_ri = merge->buf_ri;
768:   buf_rj = merge->buf_rj;
769:   len_s  = merge->len_s;
770:   PetscCommGetNewTag(comm,&taga);
771:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

773:   PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
774:   for (proc=0,k=0; proc<size; proc++) {
775:     if (!len_s[proc]) continue;
776:     i    = merge->owners_co[proc];
777:     MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
778:     k++;
779:   }
780:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
781:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}

783:   PetscFree2(s_waits,status);
784:   PetscFree(r_waits);
785:   PetscFree(coa);
786: #if defined(PTAP_PROFILE)
787:   PetscTime(&t3);
788: #endif

790:   /* 4) insert local Cseq and received values into Cmpi */
791:   /*------------------------------------------------------*/
792:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
793:   for (k=0; k<merge->nrecv; k++) {
794:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
795:     nrows       = *(buf_ri_k[k]);
796:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
797:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
798:   }

800:   for (i=0; i<cm; i++) {
801:     row  = owners[rank] + i; /* global row index of C_seq */
802:     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
803:     ba_i = ba + bi[i];
804:     bnz  = bi[i+1] - bi[i];
805:     /* add received vals into ba */
806:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
807:       /* i-th row */
808:       if (i == *nextrow[k]) {
809:         cnz    = *(nextci[k]+1) - *nextci[k];
810:         cj     = buf_rj[k] + *(nextci[k]);
811:         ca     = abuf_r[k] + *(nextci[k]);
812:         nextcj = 0;
813:         for (j=0; nextcj<cnz; j++) {
814:           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
815:             ba_i[j] += ca[nextcj++];
816:           }
817:         }
818:         nextrow[k]++; nextci[k]++;
819:         PetscLogFlops(2.0*cnz);
820:       }
821:     }
822:     MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
823:   }
824:   MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
825:   MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);

827:   PetscFree(ba);
828:   PetscFree(abuf_r[0]);
829:   PetscFree(abuf_r);
830:   PetscFree3(buf_ri_k,nextrow,nextci);
831: #if defined(PTAP_PROFILE)
832:   PetscTime(&t4);
833:   if (rank==1) PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g + %g ) + %g/comm + %g/Cloc = %g\n\n",rank,t1-t0,t2-t1,et2_AP,et2_PtAP,t3-t2,t4-t3,t4-t0);
834: #endif
835:   return(0);
836: }