Actual source code: mpiptap.c
petsc-3.6.1 2015-08-06
2: /*
3: Defines projective product routines where A is a MPIAIJ matrix
4: C = P^T * A * P
5: */
7: #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
8: #include <../src/mat/utils/freespace.h>
9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
10: #include <petscbt.h>
11: #include <petsctime.h>
13: /* #define PTAP_PROFILE */
15: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
18: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19: {
21: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
22: Mat_PtAPMPI *ptap=a->ptap;
25: if (ptap) {
26: Mat_Merge_SeqsToMPI *merge=ptap->merge;
27: PetscFree2(ptap->startsj_s,ptap->startsj_r);
28: PetscFree(ptap->bufa);
29: MatDestroy(&ptap->P_loc);
30: MatDestroy(&ptap->P_oth);
31: MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
32: if (ptap->api) {PetscFree(ptap->api);}
33: if (ptap->apj) {PetscFree(ptap->apj);}
34: if (ptap->apa) {PetscFree(ptap->apa);}
35: if (merge) {
36: PetscFree(merge->id_r);
37: PetscFree(merge->len_s);
38: PetscFree(merge->len_r);
39: PetscFree(merge->bi);
40: PetscFree(merge->bj);
41: PetscFree(merge->buf_ri[0]);
42: PetscFree(merge->buf_ri);
43: PetscFree(merge->buf_rj[0]);
44: PetscFree(merge->buf_rj);
45: PetscFree(merge->coi);
46: PetscFree(merge->coj);
47: PetscFree(merge->owners_co);
48: PetscLayoutDestroy(&merge->rowmap);
49: merge->destroy(A);
50: PetscFree(ptap->merge);
51: }
52: PetscFree(ptap);
53: }
54: return(0);
55: }
59: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
60: {
61: PetscErrorCode ierr;
62: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
63: Mat_PtAPMPI *ptap = a->ptap;
64: Mat_Merge_SeqsToMPI *merge = ptap->merge;
67: (*merge->duplicate)(A,op,M);
69: (*M)->ops->destroy = merge->destroy;
70: (*M)->ops->duplicate = merge->duplicate;
71: return(0);
72: }
76: PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
77: {
81: if (scall == MAT_INITIAL_MATRIX) {
82: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
83: MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
84: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
85: }
86: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
87: MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);
88: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
89: return(0);
90: }
94: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
95: {
96: PetscErrorCode ierr;
97: Mat Cmpi;
98: Mat_PtAPMPI *ptap;
99: PetscFreeSpaceList free_space=NULL,current_space=NULL;
100: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
101: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
102: Mat_SeqAIJ *p_loc,*p_oth;
103: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
104: PetscInt *adi=ad->i,*aj,*aoi=ao->i,nnz;
105: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
106: PetscInt am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
107: PetscBT lnkbt;
108: MPI_Comm comm;
109: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
110: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
111: PetscInt len,proc,*dnz,*onz,*owners;
112: PetscInt nzi,*pti,*ptj;
113: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
114: MPI_Request *swaits,*rwaits;
115: MPI_Status *sstatus,rstatus;
116: Mat_Merge_SeqsToMPI *merge;
117: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
118: PetscReal afill=1.0,afill_tmp;
119: PetscInt rmax;
122: PetscObjectGetComm((PetscObject)A,&comm);
124: /* check if matrix local sizes are compatible */
125: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
126: SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
127: }
128: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
129: SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
130: }
132: MPI_Comm_size(comm,&size);
133: MPI_Comm_rank(comm,&rank);
135: /* create struct Mat_PtAPMPI and attached it to C later */
136: PetscNew(&ptap);
137: PetscNew(&merge);
138: ptap->merge = merge;
139: ptap->reuse = MAT_INITIAL_MATRIX;
141: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
142: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
144: /* get P_loc by taking all local rows of P */
145: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
147: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
148: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
149: pi_loc = p_loc->i; pj_loc = p_loc->j;
150: pi_oth = p_oth->i; pj_oth = p_oth->j;
152: /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
153: /*--------------------------------------------------------------------------*/
154: PetscMalloc1(am+1,&api);
155: api[0] = 0;
157: /* create and initialize a linked list */
158: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
160: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
161: PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);
163: current_space = free_space;
165: for (i=0; i<am; i++) {
166: /* diagonal portion of A */
167: nzi = adi[i+1] - adi[i];
168: aj = ad->j + adi[i];
169: for (j=0; j<nzi; j++) {
170: row = aj[j];
171: pnz = pi_loc[row+1] - pi_loc[row];
172: Jptr = pj_loc + pi_loc[row];
173: /* add non-zero cols of P into the sorted linked list lnk */
174: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
175: }
176: /* off-diagonal portion of A */
177: nzi = aoi[i+1] - aoi[i];
178: aj = ao->j + aoi[i];
179: for (j=0; j<nzi; j++) {
180: row = aj[j];
181: pnz = pi_oth[row+1] - pi_oth[row];
182: Jptr = pj_oth + pi_oth[row];
183: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
184: }
185: apnz = lnk[0];
186: api[i+1] = api[i] + apnz;
187: if (ap_rmax < apnz) ap_rmax = apnz;
189: /* if free space is not available, double the total space in the list */
190: if (current_space->local_remaining<apnz) {
191: PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);
192: nspacedouble++;
193: }
195: /* Copy data into free space, then initialize lnk */
196: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
198: current_space->array += apnz;
199: current_space->local_used += apnz;
200: current_space->local_remaining -= apnz;
201: }
203: /* Allocate space for apj, initialize apj, and */
204: /* destroy list of free space and other temporary array(s) */
205: PetscMalloc1(api[am]+1,&apj);
206: PetscFreeSpaceContiguous(&free_space,apj);
207: afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
208: if (afill_tmp > afill) afill = afill_tmp;
210: /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
211: /*-----------------------------------------------------------------*/
212: MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
214: /* then, compute symbolic Co = (p->B)^T*AP */
215: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
216: >= (num of nonzero rows of C_seq) - pn */
217: PetscMalloc1(pon+1,&coi);
218: coi[0] = 0;
220: /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
221: nnz = fill*(poti[pon] + api[am]);
222: PetscFreeSpaceGet(nnz,&free_space);
223: current_space = free_space;
225: for (i=0; i<pon; i++) {
226: pnz = poti[i+1] - poti[i];
227: ptJ = potj + poti[i];
228: for (j=0; j<pnz; j++) {
229: row = ptJ[j]; /* row of AP == col of Pot */
230: apnz = api[row+1] - api[row];
231: Jptr = apj + api[row];
232: /* add non-zero cols of AP into the sorted linked list lnk */
233: PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
234: }
235: nnz = lnk[0];
237: /* If free space is not available, double the total space in the list */
238: if (current_space->local_remaining<nnz) {
239: PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);
240: nspacedouble++;
241: }
243: /* Copy data into free space, and zero out denserows */
244: PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
246: current_space->array += nnz;
247: current_space->local_used += nnz;
248: current_space->local_remaining -= nnz;
250: coi[i+1] = coi[i] + nnz;
251: }
252:
253: PetscMalloc1(coi[pon],&coj);
254: PetscFreeSpaceContiguous(&free_space,coj);
255: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
256: if (afill_tmp > afill) afill = afill_tmp;
257: MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
259: /* (3) send j-array (coj) of Co to other processors */
260: /*--------------------------------------------------*/
261: PetscCalloc1(size,&merge->len_s);
262: len_s = merge->len_s;
263: merge->nsend = 0;
266: /* determine row ownership */
267: PetscLayoutCreate(comm,&merge->rowmap);
268: merge->rowmap->n = pn;
269: merge->rowmap->bs = 1;
271: PetscLayoutSetUp(merge->rowmap);
272: owners = merge->rowmap->range;
274: /* determine the number of messages to send, their lengths */
275: PetscMalloc2(size,&len_si,size,&sstatus);
276: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
277: PetscMalloc1(size+2,&owners_co);
279: proc = 0;
280: for (i=0; i<pon; i++) {
281: while (prmap[i] >= owners[proc+1]) proc++;
282: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
283: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
284: }
286: len = 0; /* max length of buf_si[], see (4) */
287: owners_co[0] = 0;
288: for (proc=0; proc<size; proc++) {
289: owners_co[proc+1] = owners_co[proc] + len_si[proc];
290: if (len_s[proc]) {
291: merge->nsend++;
292: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
293: len += len_si[proc];
294: }
295: }
297: /* determine the number and length of messages to receive for coi and coj */
298: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
299: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
301: /* post the Irecv and Isend of coj */
302: PetscCommGetNewTag(comm,&tagj);
303: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
304: PetscMalloc1(merge->nsend+1,&swaits);
305: for (proc=0, k=0; proc<size; proc++) {
306: if (!len_s[proc]) continue;
307: i = owners_co[proc];
308: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
309: k++;
310: }
312: /* receives and sends of coj are complete */
313: for (i=0; i<merge->nrecv; i++) {
314: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
315: }
316: PetscFree(rwaits);
317: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
319: /* (4) send and recv coi */
320: /*-----------------------*/
321: PetscCommGetNewTag(comm,&tagi);
322: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
323: PetscMalloc1(len+1,&buf_s);
324: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
325: for (proc=0,k=0; proc<size; proc++) {
326: if (!len_s[proc]) continue;
327: /* form outgoing message for i-structure:
328: buf_si[0]: nrows to be sent
329: [1:nrows]: row index (global)
330: [nrows+1:2*nrows+1]: i-structure index
331: */
332: /*-------------------------------------------*/
333: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
334: buf_si_i = buf_si + nrows+1;
335: buf_si[0] = nrows;
336: buf_si_i[0] = 0;
337: nrows = 0;
338: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
339: nzi = coi[i+1] - coi[i];
340: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
341: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
342: nrows++;
343: }
344: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
345: k++;
346: buf_si += len_si[proc];
347: }
348: i = merge->nrecv;
349: while (i--) {
350: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
351: }
352: PetscFree(rwaits);
353: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
355: PetscFree2(len_si,sstatus);
356: PetscFree(len_ri);
357: PetscFree(swaits);
358: PetscFree(buf_s);
360: /* (5) compute the local portion of C (mpi mat) */
361: /*----------------------------------------------*/
362: MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
364: /* allocate pti array and free space for accumulating nonzero column info */
365: PetscMalloc1(pn+1,&pti);
366: pti[0] = 0;
368: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
369: nnz = fill*(pi_loc[pm] + api[am]);
370: PetscFreeSpaceGet(nnz,&free_space);
371: current_space = free_space;
373: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
374: for (k=0; k<merge->nrecv; k++) {
375: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
376: nrows = *buf_ri_k[k];
377: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
378: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
379: }
380: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
381: rmax = 0;
382: for (i=0; i<pn; i++) {
383: /* add pdt[i,:]*AP into lnk */
384: pnz = pdti[i+1] - pdti[i];
385: ptJ = pdtj + pdti[i];
386: for (j=0; j<pnz; j++) {
387: row = ptJ[j]; /* row of AP == col of Pt */
388: apnz = api[row+1] - api[row];
389: Jptr = apj + api[row];
390: /* add non-zero cols of AP into the sorted linked list lnk */
391: PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
392: }
394: /* add received col data into lnk */
395: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
396: if (i == *nextrow[k]) { /* i-th row */
397: nzi = *(nextci[k]+1) - *nextci[k];
398: Jptr = buf_rj[k] + *nextci[k];
399: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
400: nextrow[k]++; nextci[k]++;
401: }
402: }
403: nnz = lnk[0];
405: /* if free space is not available, make more free space */
406: if (current_space->local_remaining<nnz) {
407: PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);
408: nspacedouble++;
409: }
410: /* copy data into free space, then initialize lnk */
411: PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
412: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
414: current_space->array += nnz;
415: current_space->local_used += nnz;
416: current_space->local_remaining -= nnz;
418: pti[i+1] = pti[i] + nnz;
419: if (nnz > rmax) rmax = nnz;
420: }
421: MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
422: PetscFree3(buf_ri_k,nextrow,nextci);
424: PetscMalloc1(pti[pn]+1,&ptj);
425: PetscFreeSpaceContiguous(&free_space,ptj);
426: afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
427: if (afill_tmp > afill) afill = afill_tmp;
428: PetscLLDestroy(lnk,lnkbt);
430: /* (6) create symbolic parallel matrix Cmpi */
431: /*------------------------------------------*/
432: MatCreate(comm,&Cmpi);
433: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
434: MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));
435: MatSetType(Cmpi,MATMPIAIJ);
436: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
437: MatPreallocateFinalize(dnz,onz);
439: merge->bi = pti; /* Cseq->i */
440: merge->bj = ptj; /* Cseq->j */
441: merge->coi = coi; /* Co->i */
442: merge->coj = coj; /* Co->j */
443: merge->buf_ri = buf_ri;
444: merge->buf_rj = buf_rj;
445: merge->owners_co = owners_co;
446: merge->destroy = Cmpi->ops->destroy;
447: merge->duplicate = Cmpi->ops->duplicate;
449: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
450: Cmpi->assembled = PETSC_FALSE;
451: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
452: Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
454: /* attach the supporting struct to Cmpi for reuse */
455: c = (Mat_MPIAIJ*)Cmpi->data;
456: c->ptap = ptap;
457: ptap->api = api;
458: ptap->apj = apj;
459: ptap->rmax = ap_rmax;
460: *C = Cmpi;
462: /* flag 'scalable' determines which implementations to be used:
463: 0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
464: 1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
465: /* set default scalable */
466: ptap->scalable = PETSC_TRUE;
468: PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);
469: if (!ptap->scalable) { /* Do dense axpy */
470: PetscCalloc1(pN,&ptap->apa);
471: } else {
472: PetscCalloc1(ap_rmax+1,&ptap->apa);
473: }
475: #if defined(PETSC_USE_INFO)
476: if (pti[pn] != 0) {
477: PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
478: PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);
479: } else {
480: PetscInfo(Cmpi,"Empty matrix product\n");
481: }
482: #endif
483: return(0);
484: }
488: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
489: {
490: PetscErrorCode ierr;
491: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
492: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
493: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
494: Mat_SeqAIJ *p_loc,*p_oth;
495: Mat_PtAPMPI *ptap;
496: Mat_Merge_SeqsToMPI *merge;
497: PetscInt *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
498: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
499: PetscInt i,j,k,anz,pnz,apnz,nextap,row,*cj;
500: MatScalar *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
501: PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
502: MPI_Comm comm;
503: PetscMPIInt size,rank,taga,*len_s;
504: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
505: PetscInt **buf_ri,**buf_rj;
506: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
507: MPI_Request *s_waits,*r_waits;
508: MPI_Status *status;
509: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
510: PetscInt *api,*apj,*coi,*coj;
511: PetscInt *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
512: PetscBool scalable;
513: #if defined(PTAP_PROFILE)
514: PetscLogDouble t0,t1,t2,t3,t4,et2_AP=0.0,et2_PtAP=0.0,t2_0,t2_1,t2_2;
515: #endif
518: PetscObjectGetComm((PetscObject)C,&comm);
519: #if defined(PTAP_PROFILE)
520: PetscTime(&t0);
521: #endif
522: MPI_Comm_size(comm,&size);
523: MPI_Comm_rank(comm,&rank);
525: ptap = c->ptap;
526: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
527: merge = ptap->merge;
528: apa = ptap->apa;
529: scalable = ptap->scalable;
531: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
532: /*-----------------------------------------------------*/
533: if (ptap->reuse == MAT_INITIAL_MATRIX) {
534: /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
535: ptap->reuse = MAT_REUSE_MATRIX;
536: } else { /* update numerical values of P_oth and P_loc */
537: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
538: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
539: }
540: #if defined(PTAP_PROFILE)
541: PetscTime(&t1);
542: #endif
544: /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
545: /*--------------------------------------------------------------*/
546: /* get data from symbolic products */
547: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
548: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
549: pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
550: pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
552: coi = merge->coi; coj = merge->coj;
553: PetscCalloc1(coi[pon]+1,&coa);
555: bi = merge->bi; bj = merge->bj;
556: owners = merge->rowmap->range;
557: PetscCalloc1(bi[cm]+1,&ba); /* ba: Cseq->a */
559: api = ptap->api; apj = ptap->apj;
561: if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but fast */
562: PetscInfo(C,"Using non-scalable dense axpy\n");
563: /*-----------------------------------------------------------------------------------------------------*/
564: for (i=0; i<am; i++) {
565: #if defined(PTAP_PROFILE)
566: PetscTime(&t2_0);
567: #endif
568: /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
569: /*------------------------------------------------------------*/
570: apJ = apj + api[i];
572: /* diagonal portion of A */
573: anz = adi[i+1] - adi[i];
574: adj = ad->j + adi[i];
575: ada = ad->a + adi[i];
576: for (j=0; j<anz; j++) {
577: row = adj[j];
578: pnz = pi_loc[row+1] - pi_loc[row];
579: pj = pj_loc + pi_loc[row];
580: pa = pa_loc + pi_loc[row];
582: /* perform dense axpy */
583: valtmp = ada[j];
584: for (k=0; k<pnz; k++) {
585: apa[pj[k]] += valtmp*pa[k];
586: }
587: PetscLogFlops(2.0*pnz);
588: }
590: /* off-diagonal portion of A */
591: anz = aoi[i+1] - aoi[i];
592: aoj = ao->j + aoi[i];
593: aoa = ao->a + aoi[i];
594: for (j=0; j<anz; j++) {
595: row = aoj[j];
596: pnz = pi_oth[row+1] - pi_oth[row];
597: pj = pj_oth + pi_oth[row];
598: pa = pa_oth + pi_oth[row];
600: /* perform dense axpy */
601: valtmp = aoa[j];
602: for (k=0; k<pnz; k++) {
603: apa[pj[k]] += valtmp*pa[k];
604: }
605: PetscLogFlops(2.0*pnz);
606: }
607: #if defined(PTAP_PROFILE)
608: PetscTime(&t2_1);
609: et2_AP += t2_1 - t2_0;
610: #endif
612: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
613: /*--------------------------------------------------------------*/
614: apnz = api[i+1] - api[i];
615: /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
616: pnz = po->i[i+1] - po->i[i];
617: poJ = po->j + po->i[i];
618: pA = po->a + po->i[i];
619: for (j=0; j<pnz; j++) {
620: row = poJ[j];
621: cnz = coi[row+1] - coi[row];
622: cj = coj + coi[row];
623: ca = coa + coi[row];
624: /* perform dense axpy */
625: valtmp = pA[j];
626: for (k=0; k<cnz; k++) {
627: ca[k] += valtmp*apa[cj[k]];
628: }
629: PetscLogFlops(2.0*cnz);
630: }
632: /* put the value into Cd (diagonal part) */
633: pnz = pd->i[i+1] - pd->i[i];
634: pdJ = pd->j + pd->i[i];
635: pA = pd->a + pd->i[i];
636: for (j=0; j<pnz; j++) {
637: row = pdJ[j];
638: cnz = bi[row+1] - bi[row];
639: cj = bj + bi[row];
640: ca = ba + bi[row];
641: /* perform dense axpy */
642: valtmp = pA[j];
643: for (k=0; k<cnz; k++) {
644: ca[k] += valtmp*apa[cj[k]];
645: }
646: PetscLogFlops(2.0*cnz);
647: }
649: /* zero the current row of A*P */
650: for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
651: #if defined(PTAP_PROFILE)
652: PetscTime(&t2_2);
653: et2_PtAP += t2_2 - t2_1;
654: #endif
655: }
656: } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
657: PetscInfo(C,"Using scalable sparse axpy\n");
658: /*-----------------------------------------------------------------------------------------*/
659: pA=pa_loc;
660: for (i=0; i<am; i++) {
661: #if defined(PTAP_PROFILE)
662: PetscTime(&t2_0);
663: #endif
664: /* form i-th sparse row of A*P */
665: apnz = api[i+1] - api[i];
666: apJ = apj + api[i];
667: /* diagonal portion of A */
668: anz = adi[i+1] - adi[i];
669: adj = ad->j + adi[i];
670: ada = ad->a + adi[i];
671: for (j=0; j<anz; j++) {
672: row = adj[j];
673: pnz = pi_loc[row+1] - pi_loc[row];
674: pj = pj_loc + pi_loc[row];
675: pa = pa_loc + pi_loc[row];
676: valtmp = ada[j];
677: nextp = 0;
678: for (k=0; nextp<pnz; k++) {
679: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
680: apa[k] += valtmp*pa[nextp++];
681: }
682: }
683: PetscLogFlops(2.0*pnz);
684: }
685: /* off-diagonal portion of A */
686: anz = aoi[i+1] - aoi[i];
687: aoj = ao->j + aoi[i];
688: aoa = ao->a + aoi[i];
689: for (j=0; j<anz; j++) {
690: row = aoj[j];
691: pnz = pi_oth[row+1] - pi_oth[row];
692: pj = pj_oth + pi_oth[row];
693: pa = pa_oth + pi_oth[row];
694: valtmp = aoa[j];
695: nextp = 0;
696: for (k=0; nextp<pnz; k++) {
697: if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
698: apa[k] += valtmp*pa[nextp++];
699: }
700: }
701: PetscLogFlops(2.0*pnz);
702: }
703: #if defined(PTAP_PROFILE)
704: PetscTime(&t2_1);
705: et2_AP += t2_1 - t2_0;
706: #endif
708: /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
709: /*--------------------------------------------------------------*/
710: pnz = pi_loc[i+1] - pi_loc[i];
711: pJ = pj_loc + pi_loc[i];
712: for (j=0; j<pnz; j++) {
713: nextap = 0;
714: row = pJ[j]; /* global index */
715: if (row < pcstart || row >=pcend) { /* put the value into Co */
716: row = *poJ;
717: cj = coj + coi[row];
718: ca = coa + coi[row]; poJ++;
719: } else { /* put the value into Cd */
720: row = *pdJ;
721: cj = bj + bi[row];
722: ca = ba + bi[row]; pdJ++;
723: }
724: valtmp = pA[j];
725: for (k=0; nextap<apnz; k++) {
726: if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
727: }
728: PetscLogFlops(2.0*apnz);
729: }
730: pA += pnz;
731: /* zero the current row info for A*P */
732: PetscMemzero(apa,apnz*sizeof(MatScalar));
733: #if defined(PTAP_PROFILE)
734: PetscTime(&t2_2);
735: et2_PtAP += t2_2 - t2_1;
736: #endif
737: }
738: }
739: #if defined(PTAP_PROFILE)
740: PetscTime(&t2);
741: #endif
743: /* 3) send and recv matrix values coa */
744: /*------------------------------------*/
745: buf_ri = merge->buf_ri;
746: buf_rj = merge->buf_rj;
747: len_s = merge->len_s;
748: PetscCommGetNewTag(comm,&taga);
749: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
751: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
752: for (proc=0,k=0; proc<size; proc++) {
753: if (!len_s[proc]) continue;
754: i = merge->owners_co[proc];
755: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
756: k++;
757: }
758: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
759: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
761: PetscFree2(s_waits,status);
762: PetscFree(r_waits);
763: PetscFree(coa);
764: #if defined(PTAP_PROFILE)
765: PetscTime(&t3);
766: #endif
768: /* 4) insert local Cseq and received values into Cmpi */
769: /*------------------------------------------------------*/
770: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
771: for (k=0; k<merge->nrecv; k++) {
772: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
773: nrows = *(buf_ri_k[k]);
774: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
775: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
776: }
778: for (i=0; i<cm; i++) {
779: row = owners[rank] + i; /* global row index of C_seq */
780: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
781: ba_i = ba + bi[i];
782: bnz = bi[i+1] - bi[i];
783: /* add received vals into ba */
784: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
785: /* i-th row */
786: if (i == *nextrow[k]) {
787: cnz = *(nextci[k]+1) - *nextci[k];
788: cj = buf_rj[k] + *(nextci[k]);
789: ca = abuf_r[k] + *(nextci[k]);
790: nextcj = 0;
791: for (j=0; nextcj<cnz; j++) {
792: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
793: ba_i[j] += ca[nextcj++];
794: }
795: }
796: nextrow[k]++; nextci[k]++;
797: PetscLogFlops(2.0*cnz);
798: }
799: }
800: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
801: }
802: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
803: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
805: PetscFree(ba);
806: PetscFree(abuf_r[0]);
807: PetscFree(abuf_r);
808: PetscFree3(buf_ri_k,nextrow,nextci);
809: #if defined(PTAP_PROFILE)
810: PetscTime(&t4);
811: if (rank==1) PetscPrintf(MPI_COMM_SELF," [%d] PtAPNum %g/P + %g/PtAP( %g + %g ) + %g/comm + %g/Cloc = %g\n\n",rank,t1-t0,t2-t1,et2_AP,et2_PtAP,t3-t2,t4-t3,t4-t0);
812: #endif
813: return(0);
814: }