Actual source code: mpimatmatmult.c
petsc-3.14.6 2021-03-30
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
12: #include <petsc/private/vecscatterimpl.h>
14: #if defined(PETSC_HAVE_HYPRE)
15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16: #endif
18: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19: {
20: PetscErrorCode ierr;
21: Mat_Product *product = C->product;
22: Mat A=product->A,B=product->B;
23: MatProductAlgorithm alg=product->alg;
24: PetscReal fill=product->fill;
25: PetscBool flg;
28: /* scalable */
29: PetscStrcmp(alg,"scalable",&flg);
30: if (flg) {
31: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
32: return(0);
33: }
35: /* nonscalable */
36: PetscStrcmp(alg,"nonscalable",&flg);
37: if (flg) {
38: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
39: return(0);
40: }
42: /* seqmpi */
43: PetscStrcmp(alg,"seqmpi",&flg);
44: if (flg) {
45: MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);
46: return(0);
47: }
49: #if defined(PETSC_HAVE_HYPRE)
50: PetscStrcmp(alg,"hypre",&flg);
51: if (flg) {
52: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
53: return(0);
54: }
55: #endif
56: SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
57: }
59: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
60: {
62: Mat_APMPI *ptap = (Mat_APMPI*)data;
65: PetscFree2(ptap->startsj_s,ptap->startsj_r);
66: PetscFree(ptap->bufa);
67: MatDestroy(&ptap->P_loc);
68: MatDestroy(&ptap->P_oth);
69: MatDestroy(&ptap->Pt);
70: PetscFree(ptap->api);
71: PetscFree(ptap->apj);
72: PetscFree(ptap->apa);
73: PetscFree(ptap);
74: return(0);
75: }
77: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
78: {
80: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
81: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
82: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
83: PetscScalar *cda=cd->a,*coa=co->a;
84: Mat_SeqAIJ *p_loc,*p_oth;
85: PetscScalar *apa,*ca;
86: PetscInt cm =C->rmap->n;
87: Mat_APMPI *ptap;
88: PetscInt *api,*apj,*apJ,i,k;
89: PetscInt cstart=C->cmap->rstart;
90: PetscInt cdnz,conz,k0,k1;
91: MPI_Comm comm;
92: PetscMPIInt size;
95: MatCheckProduct(C,3);
96: ptap = (Mat_APMPI*)C->product->data;
97: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
98: PetscObjectGetComm((PetscObject)A,&comm);
99: MPI_Comm_size(comm,&size);
101: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
103: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
104: /*-----------------------------------------------------*/
105: /* update numerical values of P_oth and P_loc */
106: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
107: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
109: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
110: /*----------------------------------------------------------*/
111: /* get data from symbolic products */
112: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
113: p_oth = NULL;
114: if (size >1) {
115: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
116: }
118: /* get apa for storing dense row A[i,:]*P */
119: apa = ptap->apa;
121: api = ptap->api;
122: apj = ptap->apj;
123: for (i=0; i<cm; i++) {
124: /* compute apa = A[i,:]*P */
125: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
127: /* set values in C */
128: apJ = apj + api[i];
129: cdnz = cd->i[i+1] - cd->i[i];
130: conz = co->i[i+1] - co->i[i];
132: /* 1st off-diagonal part of C */
133: ca = coa + co->i[i];
134: k = 0;
135: for (k0=0; k0<conz; k0++) {
136: if (apJ[k] >= cstart) break;
137: ca[k0] = apa[apJ[k]];
138: apa[apJ[k++]] = 0.0;
139: }
141: /* diagonal part of C */
142: ca = cda + cd->i[i];
143: for (k1=0; k1<cdnz; k1++) {
144: ca[k1] = apa[apJ[k]];
145: apa[apJ[k++]] = 0.0;
146: }
148: /* 2nd off-diagonal part of C */
149: ca = coa + co->i[i];
150: for (; k0<conz; k0++) {
151: ca[k0] = apa[apJ[k]];
152: apa[apJ[k++]] = 0.0;
153: }
154: }
155: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
156: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
157: return(0);
158: }
160: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
161: {
162: PetscErrorCode ierr;
163: MPI_Comm comm;
164: PetscMPIInt size;
165: Mat_APMPI *ptap;
166: PetscFreeSpaceList free_space=NULL,current_space=NULL;
167: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
168: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
169: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
170: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
171: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
172: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
173: PetscBT lnkbt;
174: PetscReal afill;
175: MatType mtype;
178: MatCheckProduct(C,4);
179: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
180: PetscObjectGetComm((PetscObject)A,&comm);
181: MPI_Comm_size(comm,&size);
183: /* create struct Mat_APMPI and attached it to C later */
184: PetscNew(&ptap);
186: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
187: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
189: /* get P_loc by taking all local rows of P */
190: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
192: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
193: pi_loc = p_loc->i; pj_loc = p_loc->j;
194: if (size > 1) {
195: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
196: pi_oth = p_oth->i; pj_oth = p_oth->j;
197: } else {
198: p_oth = NULL;
199: pi_oth = NULL; pj_oth = NULL;
200: }
202: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
203: /*-------------------------------------------------------------------*/
204: PetscMalloc1(am+2,&api);
205: ptap->api = api;
206: api[0] = 0;
208: /* create and initialize a linked list */
209: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
211: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
212: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
213: current_space = free_space;
215: MatPreallocateInitialize(comm,am,pn,dnz,onz);
216: for (i=0; i<am; i++) {
217: /* diagonal portion of A */
218: nzi = adi[i+1] - adi[i];
219: for (j=0; j<nzi; j++) {
220: row = *adj++;
221: pnz = pi_loc[row+1] - pi_loc[row];
222: Jptr = pj_loc + pi_loc[row];
223: /* add non-zero cols of P into the sorted linked list lnk */
224: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
225: }
226: /* off-diagonal portion of A */
227: nzi = aoi[i+1] - aoi[i];
228: for (j=0; j<nzi; j++) {
229: row = *aoj++;
230: pnz = pi_oth[row+1] - pi_oth[row];
231: Jptr = pj_oth + pi_oth[row];
232: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
233: }
235: apnz = lnk[0];
236: api[i+1] = api[i] + apnz;
238: /* if free space is not available, double the total space in the list */
239: if (current_space->local_remaining<apnz) {
240: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
241: nspacedouble++;
242: }
244: /* Copy data into free space, then initialize lnk */
245: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
246: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
248: current_space->array += apnz;
249: current_space->local_used += apnz;
250: current_space->local_remaining -= apnz;
251: }
253: /* Allocate space for apj, initialize apj, and */
254: /* destroy list of free space and other temporary array(s) */
255: PetscMalloc1(api[am]+1,&ptap->apj);
256: apj = ptap->apj;
257: PetscFreeSpaceContiguous(&free_space,ptap->apj);
258: PetscLLDestroy(lnk,lnkbt);
260: /* malloc apa to store dense row A[i,:]*P */
261: PetscCalloc1(pN,&ptap->apa);
263: /* set and assemble symbolic parallel matrix C */
264: /*---------------------------------------------*/
265: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
266: MatSetBlockSizesFromMats(C,A,P);
268: MatGetType(A,&mtype);
269: MatSetType(C,mtype);
270: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
271: MatPreallocateFinalize(dnz,onz);
273: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
274: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
275: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
276: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
278: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
279: C->ops->productnumeric = MatProductNumeric_AB;
281: /* attach the supporting struct to C for reuse */
282: C->product->data = ptap;
283: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
285: /* set MatInfo */
286: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
287: if (afill < 1.0) afill = 1.0;
288: C->info.mallocs = nspacedouble;
289: C->info.fill_ratio_given = fill;
290: C->info.fill_ratio_needed = afill;
292: #if defined(PETSC_USE_INFO)
293: if (api[am]) {
294: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
295: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
296: } else {
297: PetscInfo(C,"Empty matrix product\n");
298: }
299: #endif
300: return(0);
301: }
303: /* ------------------------------------------------------- */
304: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
305: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
307: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
308: {
309: Mat_Product *product = C->product;
310: Mat A = product->A,B=product->B;
313: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
314: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
316: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
317: C->ops->productsymbolic = MatProductSymbolic_AB;
318: return(0);
319: }
320: /* -------------------------------------------------------------------- */
321: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
322: {
323: Mat_Product *product = C->product;
324: Mat A = product->A,B=product->B;
327: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
328: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
330: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
331: C->ops->productsymbolic = MatProductSymbolic_AtB;
332: return(0);
333: }
335: /* --------------------------------------------------------------------- */
336: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
337: {
339: Mat_Product *product = C->product;
342: switch (product->type) {
343: case MATPRODUCT_AB:
344: MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
345: break;
346: case MATPRODUCT_AtB:
347: MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
348: break;
349: default:
350: break;
351: }
352: return(0);
353: }
354: /* ------------------------------------------------------- */
356: typedef struct {
357: Mat workB,workB1;
358: MPI_Request *rwaits,*swaits;
359: PetscInt nsends,nrecvs;
360: MPI_Datatype *stype,*rtype;
361: PetscInt blda;
362: } MPIAIJ_MPIDense;
364: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
365: {
366: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
367: PetscErrorCode ierr;
368: PetscInt i;
371: MatDestroy(&contents->workB);
372: MatDestroy(&contents->workB1);
373: for (i=0; i<contents->nsends; i++) {
374: MPI_Type_free(&contents->stype[i]);
375: }
376: for (i=0; i<contents->nrecvs; i++) {
377: MPI_Type_free(&contents->rtype[i]);
378: }
379: PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);
380: PetscFree(contents);
381: return(0);
382: }
384: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
385: {
386: PetscErrorCode ierr;
387: Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data;
388: PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,clda;
389: MPIAIJ_MPIDense *contents;
390: VecScatter ctx=aij->Mvctx;
391: PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb;
392: MPI_Comm comm;
393: MPI_Datatype type1,*stype,*rtype;
394: const PetscInt *sindices,*sstarts,*rstarts;
395: PetscMPIInt *disp;
396: PetscBool cisdense;
399: MatCheckProduct(C,4);
400: if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
401: PetscObjectGetComm((PetscObject)A,&comm);
402: PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense);
403: if (!cisdense) {
404: MatSetType(C,((PetscObject)B)->type_name);
405: }
406: MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);
407: MatSetBlockSizesFromMats(C,A,B);
408: MatSetUp(C);
409: MatDenseGetLDA(B,&blda);
410: MatDenseGetLDA(C,&clda);
411: PetscNew(&contents);
413: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
414: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
416: /* Create column block of B and C for memory scalability when BN is too large */
417: /* Estimate Bbn, column size of Bb */
418: if (nz) {
419: Bbn1 = 2*Am*BN/nz;
420: if (!Bbn1) Bbn1 = 1;
421: } else Bbn1 = BN;
423: bs = PetscAbs(B->cmap->bs);
424: Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
425: if (Bbn1 > BN) Bbn1 = BN;
426: MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);
428: /* Enable runtime option for Bbn */
429: PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
430: PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);
431: PetscOptionsEnd();
432: Bbn = PetscMin(Bbn,BN);
434: if (Bbn > 0 && Bbn < BN) {
435: numBb = BN/Bbn;
436: Bbn1 = BN - numBb*Bbn;
437: } else numBb = 0;
439: if (numBb) {
440: PetscInfo3(C,"use Bb, BN=%D, Bbn=%D; numBb=%D\n",BN,Bbn,numBb);
441: if (Bbn1) { /* Create workB1 for the remaining columns */
442: PetscInfo2(C,"use Bb1, BN=%D, Bbn1=%D\n",BN,Bbn1);
443: /* Create work matrix used to store off processor rows of B needed for local product */
444: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);
445: } else contents->workB1 = NULL;
446: }
448: /* Create work matrix used to store off processor rows of B needed for local product */
449: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);
451: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
452: PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);
453: contents->stype = stype;
454: contents->nsends = nsends;
456: contents->rtype = rtype;
457: contents->nrecvs = nrecvs;
458: contents->blda = blda;
460: PetscMalloc1(Bm+1,&disp);
461: for (i=0; i<nsends; i++) {
462: nrows_to = sstarts[i+1]-sstarts[i];
463: for (j=0; j<nrows_to; j++){
464: disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
465: }
466: MPI_Type_create_indexed_block(nrows_to,1,(const PetscMPIInt *)disp,MPIU_SCALAR,&type1);
468: MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]);
469: MPI_Type_commit(&stype[i]);
470: MPI_Type_free(&type1);
471: }
473: for (i=0; i<nrecvs; i++) {
474: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
475: nrows_from = rstarts[i+1]-rstarts[i];
476: disp[0] = 0;
477: MPI_Type_create_indexed_block(1, nrows_from, (const PetscMPIInt *)disp, MPIU_SCALAR, &type1);
478: MPI_Type_create_resized(type1, 0, nz*sizeof(PetscScalar), &rtype[i]);
479: MPI_Type_commit(&rtype[i]);
480: MPI_Type_free(&type1);
481: }
483: PetscFree(disp);
484: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
485: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
486: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
487: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
488: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
489: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
491: C->product->data = contents;
492: C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
493: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
494: return(0);
495: }
497: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool);
498: /*
499: Performs an efficient scatter on the rows of B needed by this process; this is
500: a modification of the VecScatterBegin_() routines.
502: Input: Bbidx = 0: B = Bb
503: = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
504: */
505: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
506: {
507: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
508: PetscErrorCode ierr;
509: const PetscScalar *b;
510: PetscScalar *rvalues;
511: VecScatter ctx = aij->Mvctx;
512: const PetscInt *sindices,*sstarts,*rstarts;
513: const PetscMPIInt *sprocs,*rprocs;
514: PetscInt i,nsends,nrecvs;
515: MPI_Request *swaits,*rwaits;
516: MPI_Comm comm;
517: PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi;
518: MPIAIJ_MPIDense *contents;
519: Mat workB;
520: MPI_Datatype *stype,*rtype;
521: PetscInt blda;
524: MatCheckProduct(C,4);
525: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
526: contents = (MPIAIJ_MPIDense*)C->product->data;
527: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);
528: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);
529: PetscMPIIntCast(nsends,&nsends_mpi);
530: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
531: if (Bbidx == 0) {
532: workB = *outworkB = contents->workB;
533: } else {
534: workB = *outworkB = contents->workB1;
535: }
536: if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",workB->cmap->n,nrows);
537: swaits = contents->swaits;
538: rwaits = contents->rwaits;
540: MatDenseGetArrayRead(B,&b);
541: MatDenseGetLDA(B,&blda);
542: if (blda != contents->blda) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot reuse an input matrix with lda %D != %D",blda,contents->blda);
543: MatDenseGetArray(workB,&rvalues);
545: /* Post recv, use MPI derived data type to save memory */
546: PetscObjectGetComm((PetscObject)C,&comm);
547: rtype = contents->rtype;
548: for (i=0; i<nrecvs; i++) {
549: MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);
550: }
552: stype = contents->stype;
553: for (i=0; i<nsends; i++) {
554: MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);
555: }
557: if (nrecvs) {MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE);}
558: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
560: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);
561: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);
562: MatDenseRestoreArrayRead(B,&b);
563: MatDenseRestoreArray(workB,&rvalues);
564: return(0);
565: }
567: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
568: {
569: PetscErrorCode ierr;
570: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
571: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
572: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
573: Mat workB;
574: MPIAIJ_MPIDense *contents;
577: MatCheckProduct(C,3);
578: if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
579: contents = (MPIAIJ_MPIDense*)C->product->data;
580: /* diagonal block of A times all local rows of B */
581: /* TODO: this calls a symbolic multiplication every time, which could be avoided */
582: MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A);
583: if (contents->workB->cmap->n == B->cmap->N) {
584: /* get off processor parts of B needed to complete C=A*B */
585: MatMPIDenseScatter(A,B,0,C,&workB);
587: /* off-diagonal block of A times nonlocal rows of B */
588: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
589: } else {
590: Mat Bb,Cb;
591: PetscInt BN=B->cmap->N,n=contents->workB->cmap->n,i;
592: if (n <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Column block size %D must be positive",n);
594: for (i=0; i<BN; i+=n) {
595: MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb);
596: MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb);
598: /* get off processor parts of B needed to complete C=A*B */
599: MatMPIDenseScatter(A,Bb,i+n>BN,C,&workB);
601: /* off-diagonal block of A times nonlocal rows of B */
602: cdense = (Mat_MPIDense*)Cb->data;
603: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
605: MatDenseRestoreSubMatrix(B,&Bb);
606: MatDenseRestoreSubMatrix(C,&Cb);
607: }
608: }
609: return(0);
610: }
612: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
613: {
615: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
616: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
617: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
618: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
619: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
620: Mat_SeqAIJ *p_loc,*p_oth;
621: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
622: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
623: PetscInt cm = C->rmap->n,anz,pnz;
624: Mat_APMPI *ptap;
625: PetscScalar *apa_sparse;
626: PetscInt *api,*apj,*apJ,i,j,k,row;
627: PetscInt cstart = C->cmap->rstart;
628: PetscInt cdnz,conz,k0,k1,nextp;
629: MPI_Comm comm;
630: PetscMPIInt size;
633: MatCheckProduct(C,3);
634: ptap = (Mat_APMPI*)C->product->data;
635: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
636: PetscObjectGetComm((PetscObject)C,&comm);
637: MPI_Comm_size(comm,&size);
638: if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
640: apa_sparse = ptap->apa;
642: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
643: /*-----------------------------------------------------*/
644: /* update numerical values of P_oth and P_loc */
645: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
646: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
648: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
649: /*----------------------------------------------------------*/
650: /* get data from symbolic products */
651: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
652: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
653: if (size >1) {
654: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
655: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
656: } else {
657: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
658: }
660: api = ptap->api;
661: apj = ptap->apj;
662: for (i=0; i<cm; i++) {
663: apJ = apj + api[i];
665: /* diagonal portion of A */
666: anz = adi[i+1] - adi[i];
667: adj = ad->j + adi[i];
668: ada = ad->a + adi[i];
669: for (j=0; j<anz; j++) {
670: row = adj[j];
671: pnz = pi_loc[row+1] - pi_loc[row];
672: pj = pj_loc + pi_loc[row];
673: pa = pa_loc + pi_loc[row];
674: /* perform sparse axpy */
675: valtmp = ada[j];
676: nextp = 0;
677: for (k=0; nextp<pnz; k++) {
678: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
679: apa_sparse[k] += valtmp*pa[nextp++];
680: }
681: }
682: PetscLogFlops(2.0*pnz);
683: }
685: /* off-diagonal portion of A */
686: anz = aoi[i+1] - aoi[i];
687: aoj = ao->j + aoi[i];
688: aoa = ao->a + aoi[i];
689: for (j=0; j<anz; j++) {
690: row = aoj[j];
691: pnz = pi_oth[row+1] - pi_oth[row];
692: pj = pj_oth + pi_oth[row];
693: pa = pa_oth + pi_oth[row];
694: /* perform sparse axpy */
695: valtmp = aoa[j];
696: nextp = 0;
697: for (k=0; nextp<pnz; k++) {
698: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
699: apa_sparse[k] += valtmp*pa[nextp++];
700: }
701: }
702: PetscLogFlops(2.0*pnz);
703: }
705: /* set values in C */
706: cdnz = cd->i[i+1] - cd->i[i];
707: conz = co->i[i+1] - co->i[i];
709: /* 1st off-diagonal part of C */
710: ca = coa + co->i[i];
711: k = 0;
712: for (k0=0; k0<conz; k0++) {
713: if (apJ[k] >= cstart) break;
714: ca[k0] = apa_sparse[k];
715: apa_sparse[k] = 0.0;
716: k++;
717: }
719: /* diagonal part of C */
720: ca = cda + cd->i[i];
721: for (k1=0; k1<cdnz; k1++) {
722: ca[k1] = apa_sparse[k];
723: apa_sparse[k] = 0.0;
724: k++;
725: }
727: /* 2nd off-diagonal part of C */
728: ca = coa + co->i[i];
729: for (; k0<conz; k0++) {
730: ca[k0] = apa_sparse[k];
731: apa_sparse[k] = 0.0;
732: k++;
733: }
734: }
735: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
736: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
737: return(0);
738: }
740: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
741: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
742: {
743: PetscErrorCode ierr;
744: MPI_Comm comm;
745: PetscMPIInt size;
746: Mat_APMPI *ptap;
747: PetscFreeSpaceList free_space = NULL,current_space=NULL;
748: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
749: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
750: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
751: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
752: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
753: PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
754: PetscReal afill;
755: MatType mtype;
758: MatCheckProduct(C,4);
759: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
760: PetscObjectGetComm((PetscObject)A,&comm);
761: MPI_Comm_size(comm,&size);
763: /* create struct Mat_APMPI and attached it to C later */
764: PetscNew(&ptap);
766: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
767: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
769: /* get P_loc by taking all local rows of P */
770: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
772: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
773: pi_loc = p_loc->i; pj_loc = p_loc->j;
774: if (size > 1) {
775: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
776: pi_oth = p_oth->i; pj_oth = p_oth->j;
777: } else {
778: p_oth = NULL;
779: pi_oth = NULL; pj_oth = NULL;
780: }
782: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
783: /*-------------------------------------------------------------------*/
784: PetscMalloc1(am+2,&api);
785: ptap->api = api;
786: api[0] = 0;
788: PetscLLCondensedCreate_Scalable(lsize,&lnk);
790: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
791: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
792: current_space = free_space;
793: MatPreallocateInitialize(comm,am,pn,dnz,onz);
794: for (i=0; i<am; i++) {
795: /* diagonal portion of A */
796: nzi = adi[i+1] - adi[i];
797: for (j=0; j<nzi; j++) {
798: row = *adj++;
799: pnz = pi_loc[row+1] - pi_loc[row];
800: Jptr = pj_loc + pi_loc[row];
801: /* Expand list if it is not long enough */
802: if (pnz+apnz_max > lsize) {
803: lsize = pnz+apnz_max;
804: PetscLLCondensedExpand_Scalable(lsize, &lnk);
805: }
806: /* add non-zero cols of P into the sorted linked list lnk */
807: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
808: apnz = *lnk; /* The first element in the list is the number of items in the list */
809: api[i+1] = api[i] + apnz;
810: if (apnz > apnz_max) apnz_max = apnz;
811: }
812: /* off-diagonal portion of A */
813: nzi = aoi[i+1] - aoi[i];
814: for (j=0; j<nzi; j++) {
815: row = *aoj++;
816: pnz = pi_oth[row+1] - pi_oth[row];
817: Jptr = pj_oth + pi_oth[row];
818: /* Expand list if it is not long enough */
819: if (pnz+apnz_max > lsize) {
820: lsize = pnz + apnz_max;
821: PetscLLCondensedExpand_Scalable(lsize, &lnk);
822: }
823: /* add non-zero cols of P into the sorted linked list lnk */
824: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
825: apnz = *lnk; /* The first element in the list is the number of items in the list */
826: api[i+1] = api[i] + apnz;
827: if (apnz > apnz_max) apnz_max = apnz;
828: }
829: apnz = *lnk;
830: api[i+1] = api[i] + apnz;
831: if (apnz > apnz_max) apnz_max = apnz;
833: /* if free space is not available, double the total space in the list */
834: if (current_space->local_remaining<apnz) {
835: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
836: nspacedouble++;
837: }
839: /* Copy data into free space, then initialize lnk */
840: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
841: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
843: current_space->array += apnz;
844: current_space->local_used += apnz;
845: current_space->local_remaining -= apnz;
846: }
848: /* Allocate space for apj, initialize apj, and */
849: /* destroy list of free space and other temporary array(s) */
850: PetscMalloc1(api[am]+1,&ptap->apj);
851: apj = ptap->apj;
852: PetscFreeSpaceContiguous(&free_space,ptap->apj);
853: PetscLLCondensedDestroy_Scalable(lnk);
855: /* create and assemble symbolic parallel matrix C */
856: /*----------------------------------------------------*/
857: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
858: MatSetBlockSizesFromMats(C,A,P);
859: MatGetType(A,&mtype);
860: MatSetType(C,mtype);
861: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
862: MatPreallocateFinalize(dnz,onz);
864: /* malloc apa for assembly C */
865: PetscCalloc1(apnz_max,&ptap->apa);
867: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
868: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
869: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
870: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
872: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
873: C->ops->productnumeric = MatProductNumeric_AB;
875: /* attach the supporting struct to C for reuse */
876: C->product->data = ptap;
877: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
879: /* set MatInfo */
880: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
881: if (afill < 1.0) afill = 1.0;
882: C->info.mallocs = nspacedouble;
883: C->info.fill_ratio_given = fill;
884: C->info.fill_ratio_needed = afill;
886: #if defined(PETSC_USE_INFO)
887: if (api[am]) {
888: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
889: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
890: } else {
891: PetscInfo(C,"Empty matrix product\n");
892: }
893: #endif
894: return(0);
895: }
897: /* This function is needed for the seqMPI matrix-matrix multiplication. */
898: /* Three input arrays are merged to one output array. The size of the */
899: /* output array is also output. Duplicate entries only show up once. */
900: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1,
901: PetscInt size2, PetscInt *in2,
902: PetscInt size3, PetscInt *in3,
903: PetscInt *size4, PetscInt *out)
904: {
905: int i = 0, j = 0, k = 0, l = 0;
907: /* Traverse all three arrays */
908: while (i<size1 && j<size2 && k<size3) {
909: if (in1[i] < in2[j] && in1[i] < in3[k]) {
910: out[l++] = in1[i++];
911: }
912: else if (in2[j] < in1[i] && in2[j] < in3[k]) {
913: out[l++] = in2[j++];
914: }
915: else if (in3[k] < in1[i] && in3[k] < in2[j]) {
916: out[l++] = in3[k++];
917: }
918: else if (in1[i] == in2[j] && in1[i] < in3[k]) {
919: out[l++] = in1[i];
920: i++, j++;
921: }
922: else if (in1[i] == in3[k] && in1[i] < in2[j]) {
923: out[l++] = in1[i];
924: i++, k++;
925: }
926: else if (in3[k] == in2[j] && in2[j] < in1[i]) {
927: out[l++] = in2[j];
928: k++, j++;
929: }
930: else if (in1[i] == in2[j] && in1[i] == in3[k]) {
931: out[l++] = in1[i];
932: i++, j++, k++;
933: }
934: }
936: /* Traverse two remaining arrays */
937: while (i<size1 && j<size2) {
938: if (in1[i] < in2[j]) {
939: out[l++] = in1[i++];
940: }
941: else if (in1[i] > in2[j]) {
942: out[l++] = in2[j++];
943: }
944: else {
945: out[l++] = in1[i];
946: i++, j++;
947: }
948: }
950: while (i<size1 && k<size3) {
951: if (in1[i] < in3[k]) {
952: out[l++] = in1[i++];
953: }
954: else if (in1[i] > in3[k]) {
955: out[l++] = in3[k++];
956: }
957: else {
958: out[l++] = in1[i];
959: i++, k++;
960: }
961: }
963: while (k<size3 && j<size2) {
964: if (in3[k] < in2[j]) {
965: out[l++] = in3[k++];
966: }
967: else if (in3[k] > in2[j]) {
968: out[l++] = in2[j++];
969: }
970: else {
971: out[l++] = in3[k];
972: k++, j++;
973: }
974: }
976: /* Traverse one remaining array */
977: while (i<size1) out[l++] = in1[i++];
978: while (j<size2) out[l++] = in2[j++];
979: while (k<size3) out[l++] = in3[k++];
981: *size4 = l;
982: }
984: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
985: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
986: /* matrix-matrix multiplications. */
987: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
988: {
989: PetscErrorCode ierr;
990: MPI_Comm comm;
991: PetscMPIInt size;
992: Mat_APMPI *ptap;
993: PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
994: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
995: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
996: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
997: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
998: PetscInt adponz, adpdnz;
999: PetscInt *pi_loc,*dnz,*onz;
1000: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
1001: PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
1002: *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1003: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
1004: PetscBT lnkbt;
1005: PetscReal afill;
1006: PetscMPIInt rank;
1007: Mat adpd, aopoth;
1008: MatType mtype;
1009: const char *prefix;
1012: MatCheckProduct(C,4);
1013: if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
1014: PetscObjectGetComm((PetscObject)A,&comm);
1015: MPI_Comm_size(comm,&size);
1016: MPI_Comm_rank(comm, &rank);
1017: MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);
1019: /* create struct Mat_APMPI and attached it to C later */
1020: PetscNew(&ptap);
1022: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1023: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1025: /* get P_loc by taking all local rows of P */
1026: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
1029: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1030: pi_loc = p_loc->i;
1032: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1033: PetscMalloc1(am+2,&api);
1034: PetscMalloc1(am+2,&adpoi);
1036: adpoi[0] = 0;
1037: ptap->api = api;
1038: api[0] = 0;
1040: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1041: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
1042: MatPreallocateInitialize(comm,am,pn,dnz,onz);
1044: /* Symbolic calc of A_loc_diag * P_loc_diag */
1045: MatGetOptionsPrefix(A,&prefix);
1046: MatProductCreate(a->A,p->A,NULL,&adpd);
1047: MatGetOptionsPrefix(A,&prefix);
1048: MatSetOptionsPrefix(adpd,prefix);
1049: MatAppendOptionsPrefix(adpd,"inner_diag_");
1051: MatProductSetType(adpd,MATPRODUCT_AB);
1052: MatProductSetAlgorithm(adpd,"sorted");
1053: MatProductSetFill(adpd,fill);
1054: MatProductSetFromOptions(adpd);
1055: MatProductSymbolic(adpd);
1057: adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1058: adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1059: p_off = (Mat_SeqAIJ*)((p->B)->data);
1060: poff_i = p_off->i; poff_j = p_off->j;
1062: /* j_temp stores indices of a result row before they are added to the linked list */
1063: PetscMalloc1(pN+2,&j_temp);
1066: /* Symbolic calc of the A_diag * p_loc_off */
1067: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1068: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);
1069: current_space = free_space_diag;
1071: for (i=0; i<am; i++) {
1072: /* A_diag * P_loc_off */
1073: nzi = adi[i+1] - adi[i];
1074: for (j=0; j<nzi; j++) {
1075: row = *adj++;
1076: pnz = poff_i[row+1] - poff_i[row];
1077: Jptr = poff_j + poff_i[row];
1078: for (i1 = 0; i1 < pnz; i1++) {
1079: j_temp[i1] = p->garray[Jptr[i1]];
1080: }
1081: /* add non-zero cols of P into the sorted linked list lnk */
1082: PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);
1083: }
1085: adponz = lnk[0];
1086: adpoi[i+1] = adpoi[i] + adponz;
1088: /* if free space is not available, double the total space in the list */
1089: if (current_space->local_remaining<adponz) {
1090: PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);
1091: nspacedouble++;
1092: }
1094: /* Copy data into free space, then initialize lnk */
1095: PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);
1097: current_space->array += adponz;
1098: current_space->local_used += adponz;
1099: current_space->local_remaining -= adponz;
1100: }
1102: /* Symbolic calc of A_off * P_oth */
1103: MatSetOptionsPrefix(a->B,prefix);
1104: MatAppendOptionsPrefix(a->B,"inner_offdiag_");
1105: MatCreate(PETSC_COMM_SELF,&aopoth);
1106: MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1107: aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1108: aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1110: /* Allocate space for apj, adpj, aopj, ... */
1111: /* destroy lists of free space and other temporary array(s) */
1113: PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);
1114: PetscMalloc1(adpoi[am]+2, &adpoj);
1116: /* Copy from linked list to j-array */
1117: PetscFreeSpaceContiguous(&free_space_diag,adpoj);
1118: PetscLLDestroy(lnk,lnkbt);
1120: adpoJ = adpoj;
1121: adpdJ = adpdj;
1122: aopJ = aopothj;
1123: apj = ptap->apj;
1124: apJ = apj; /* still empty */
1126: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1127: /* A_diag * P_loc_diag to get A*P */
1128: for (i = 0; i < am; i++) {
1129: aopnz = aopothi[i+1] - aopothi[i];
1130: adponz = adpoi[i+1] - adpoi[i];
1131: adpdnz = adpdi[i+1] - adpdi[i];
1133: /* Correct indices from A_diag*P_diag */
1134: for (i1 = 0; i1 < adpdnz; i1++) {
1135: adpdJ[i1] += p_colstart;
1136: }
1137: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1138: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1139: MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);
1141: aopJ += aopnz;
1142: adpoJ += adponz;
1143: adpdJ += adpdnz;
1144: apJ += apnz;
1145: api[i+1] = api[i] + apnz;
1146: }
1148: /* malloc apa to store dense row A[i,:]*P */
1149: PetscCalloc1(pN+2,&ptap->apa);
1151: /* create and assemble symbolic parallel matrix C */
1152: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1153: MatSetBlockSizesFromMats(C,A,P);
1154: MatGetType(A,&mtype);
1155: MatSetType(C,mtype);
1156: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1157: MatPreallocateFinalize(dnz,onz);
1159: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1160: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1161: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1162: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1164: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1165: C->ops->productnumeric = MatProductNumeric_AB;
1167: /* attach the supporting struct to C for reuse */
1168: C->product->data = ptap;
1169: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1171: /* set MatInfo */
1172: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1173: if (afill < 1.0) afill = 1.0;
1174: C->info.mallocs = nspacedouble;
1175: C->info.fill_ratio_given = fill;
1176: C->info.fill_ratio_needed = afill;
1178: #if defined(PETSC_USE_INFO)
1179: if (api[am]) {
1180: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1181: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1182: } else {
1183: PetscInfo(C,"Empty matrix product\n");
1184: }
1185: #endif
1187: MatDestroy(&aopoth);
1188: MatDestroy(&adpd);
1189: PetscFree(j_temp);
1190: PetscFree(adpoj);
1191: PetscFree(adpoi);
1192: return(0);
1193: }
1195: /*-------------------------------------------------------------------------*/
1196: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1197: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1198: {
1200: Mat_APMPI *ptap;
1201: Mat Pt;
1204: MatCheckProduct(C,3);
1205: ptap = (Mat_APMPI*)C->product->data;
1206: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1207: if (!ptap->Pt) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1209: Pt = ptap->Pt;
1210: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
1211: MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);
1212: return(0);
1213: }
1215: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1216: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1217: {
1218: PetscErrorCode ierr;
1219: Mat_APMPI *ptap;
1220: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1221: MPI_Comm comm;
1222: PetscMPIInt size,rank;
1223: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1224: PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1225: PetscInt *lnk,i,k,nsend,rstart;
1226: PetscBT lnkbt;
1227: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,nrecv;
1228: PETSC_UNUSED PetscMPIInt icompleted=0;
1229: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1230: PetscInt len,proc,*dnz,*onz,*owners,nzi;
1231: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1232: MPI_Request *swaits,*rwaits;
1233: MPI_Status *sstatus,rstatus;
1234: PetscLayout rowmap;
1235: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1236: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
1237: PetscInt *Jptr,*prmap=p->garray,con,j,Crmax;
1238: Mat_SeqAIJ *a_loc,*c_loc,*c_oth;
1239: PetscTable ta;
1240: MatType mtype;
1241: const char *prefix;
1244: PetscObjectGetComm((PetscObject)A,&comm);
1245: MPI_Comm_size(comm,&size);
1246: MPI_Comm_rank(comm,&rank);
1248: /* create symbolic parallel matrix C */
1249: MatGetType(A,&mtype);
1250: MatSetType(C,mtype);
1252: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1254: /* create struct Mat_APMPI and attached it to C later */
1255: PetscNew(&ptap);
1256: ptap->reuse = MAT_INITIAL_MATRIX;
1258: /* (0) compute Rd = Pd^T, Ro = Po^T */
1259: /* --------------------------------- */
1260: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
1261: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
1263: /* (1) compute symbolic A_loc */
1264: /* ---------------------------*/
1265: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);
1267: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1268: /* ------------------------------------ */
1269: MatGetOptionsPrefix(A,&prefix);
1270: MatSetOptionsPrefix(ptap->Ro,prefix);
1271: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
1272: MatCreate(PETSC_COMM_SELF,&ptap->C_oth);
1273: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);
1275: /* (3) send coj of C_oth to other processors */
1276: /* ------------------------------------------ */
1277: /* determine row ownership */
1278: PetscLayoutCreate(comm,&rowmap);
1279: rowmap->n = pn;
1280: rowmap->bs = 1;
1281: PetscLayoutSetUp(rowmap);
1282: owners = rowmap->range;
1284: /* determine the number of messages to send, their lengths */
1285: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
1286: PetscArrayzero(len_s,size);
1287: PetscArrayzero(len_si,size);
1289: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1290: coi = c_oth->i; coj = c_oth->j;
1291: con = ptap->C_oth->rmap->n;
1292: proc = 0;
1293: for (i=0; i<con; i++) {
1294: while (prmap[i] >= owners[proc+1]) proc++;
1295: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1296: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1297: }
1299: len = 0; /* max length of buf_si[], see (4) */
1300: owners_co[0] = 0;
1301: nsend = 0;
1302: for (proc=0; proc<size; proc++) {
1303: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1304: if (len_s[proc]) {
1305: nsend++;
1306: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1307: len += len_si[proc];
1308: }
1309: }
1311: /* determine the number and length of messages to receive for coi and coj */
1312: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
1313: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
1315: /* post the Irecv and Isend of coj */
1316: PetscCommGetNewTag(comm,&tagj);
1317: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
1318: PetscMalloc1(nsend+1,&swaits);
1319: for (proc=0, k=0; proc<size; proc++) {
1320: if (!len_s[proc]) continue;
1321: i = owners_co[proc];
1322: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1323: k++;
1324: }
1326: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1327: /* ---------------------------------------- */
1328: MatSetOptionsPrefix(ptap->Rd,prefix);
1329: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
1330: MatCreate(PETSC_COMM_SELF,&ptap->C_loc);
1331: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);
1332: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1334: /* receives coj are complete */
1335: for (i=0; i<nrecv; i++) {
1336: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1337: }
1338: PetscFree(rwaits);
1339: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1341: /* add received column indices into ta to update Crmax */
1342: a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1344: /* create and initialize a linked list */
1345: PetscTableCreate(an,aN,&ta); /* for compute Crmax */
1346: MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1348: for (k=0; k<nrecv; k++) {/* k-th received message */
1349: Jptr = buf_rj[k];
1350: for (j=0; j<len_r[k]; j++) {
1351: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1352: }
1353: }
1354: PetscTableGetCount(ta,&Crmax);
1355: PetscTableDestroy(&ta);
1357: /* (4) send and recv coi */
1358: /*-----------------------*/
1359: PetscCommGetNewTag(comm,&tagi);
1360: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
1361: PetscMalloc1(len+1,&buf_s);
1362: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1363: for (proc=0,k=0; proc<size; proc++) {
1364: if (!len_s[proc]) continue;
1365: /* form outgoing message for i-structure:
1366: buf_si[0]: nrows to be sent
1367: [1:nrows]: row index (global)
1368: [nrows+1:2*nrows+1]: i-structure index
1369: */
1370: /*-------------------------------------------*/
1371: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1372: buf_si_i = buf_si + nrows+1;
1373: buf_si[0] = nrows;
1374: buf_si_i[0] = 0;
1375: nrows = 0;
1376: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1377: nzi = coi[i+1] - coi[i];
1378: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1379: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1380: nrows++;
1381: }
1382: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1383: k++;
1384: buf_si += len_si[proc];
1385: }
1386: for (i=0; i<nrecv; i++) {
1387: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1388: }
1389: PetscFree(rwaits);
1390: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1392: PetscFree4(len_s,len_si,sstatus,owners_co);
1393: PetscFree(len_ri);
1394: PetscFree(swaits);
1395: PetscFree(buf_s);
1397: /* (5) compute the local portion of C */
1398: /* ------------------------------------------ */
1399: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1400: PetscFreeSpaceGet(Crmax,&free_space);
1401: current_space = free_space;
1403: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
1404: for (k=0; k<nrecv; k++) {
1405: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1406: nrows = *buf_ri_k[k];
1407: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1408: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1409: }
1411: MatPreallocateInitialize(comm,pn,an,dnz,onz);
1412: PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);
1413: for (i=0; i<pn; i++) {
1414: /* add C_loc into C */
1415: nzi = c_loc->i[i+1] - c_loc->i[i];
1416: Jptr = c_loc->j + c_loc->i[i];
1417: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1419: /* add received col data into lnk */
1420: for (k=0; k<nrecv; k++) { /* k-th received message */
1421: if (i == *nextrow[k]) { /* i-th row */
1422: nzi = *(nextci[k]+1) - *nextci[k];
1423: Jptr = buf_rj[k] + *nextci[k];
1424: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1425: nextrow[k]++; nextci[k]++;
1426: }
1427: }
1428: nzi = lnk[0];
1430: /* copy data into free space, then initialize lnk */
1431: PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);
1432: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
1433: }
1434: PetscFree3(buf_ri_k,nextrow,nextci);
1435: PetscLLDestroy(lnk,lnkbt);
1436: PetscFreeSpaceDestroy(free_space);
1438: /* local sizes and preallocation */
1439: MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);
1440: if (P->cmap->bs > 0) {PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);}
1441: if (A->cmap->bs > 0) {PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);}
1442: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1443: MatPreallocateFinalize(dnz,onz);
1445: /* add C_loc and C_oth to C */
1446: MatGetOwnershipRange(C,&rstart,NULL);
1447: for (i=0; i<pn; i++) {
1448: const PetscInt ncols = c_loc->i[i+1] - c_loc->i[i];
1449: const PetscInt *cols = c_loc->j + c_loc->i[i];
1450: const PetscInt row = rstart + i;
1451: MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);
1452: }
1453: for (i=0; i<con; i++) {
1454: const PetscInt ncols = c_oth->i[i+1] - c_oth->i[i];
1455: const PetscInt *cols = c_oth->j + c_oth->i[i];
1456: const PetscInt row = prmap[i];
1457: MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);
1458: }
1459: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1460: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1461: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1463: /* members in merge */
1464: PetscFree(id_r);
1465: PetscFree(len_r);
1466: PetscFree(buf_ri[0]);
1467: PetscFree(buf_ri);
1468: PetscFree(buf_rj[0]);
1469: PetscFree(buf_rj);
1470: PetscLayoutDestroy(&rowmap);
1472: /* attach the supporting struct to C for reuse */
1473: C->product->data = ptap;
1474: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1475: return(0);
1476: }
1478: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1479: {
1480: PetscErrorCode ierr;
1481: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1482: Mat_SeqAIJ *c_seq;
1483: Mat_APMPI *ptap;
1484: Mat A_loc,C_loc,C_oth;
1485: PetscInt i,rstart,rend,cm,ncols,row;
1486: const PetscInt *cols;
1487: const PetscScalar *vals;
1490: MatCheckProduct(C,3);
1491: ptap = (Mat_APMPI*)C->product->data;
1492: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1493: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1494: MatZeroEntries(C);
1496: if (ptap->reuse == MAT_REUSE_MATRIX) {
1497: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1498: /* 1) get R = Pd^T, Ro = Po^T */
1499: /*----------------------------*/
1500: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1501: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1503: /* 2) compute numeric A_loc */
1504: /*--------------------------*/
1505: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);
1506: }
1508: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1509: A_loc = ptap->A_loc;
1510: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);
1511: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);
1512: C_loc = ptap->C_loc;
1513: C_oth = ptap->C_oth;
1515: /* add C_loc and C_oth to C */
1516: MatGetOwnershipRange(C,&rstart,&rend);
1518: /* C_loc -> C */
1519: cm = C_loc->rmap->N;
1520: c_seq = (Mat_SeqAIJ*)C_loc->data;
1521: cols = c_seq->j;
1522: vals = c_seq->a;
1523: for (i=0; i<cm; i++) {
1524: ncols = c_seq->i[i+1] - c_seq->i[i];
1525: row = rstart + i;
1526: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1527: cols += ncols; vals += ncols;
1528: }
1530: /* Co -> C, off-processor part */
1531: cm = C_oth->rmap->N;
1532: c_seq = (Mat_SeqAIJ*)C_oth->data;
1533: cols = c_seq->j;
1534: vals = c_seq->a;
1535: for (i=0; i<cm; i++) {
1536: ncols = c_seq->i[i+1] - c_seq->i[i];
1537: row = p->garray[i];
1538: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1539: cols += ncols; vals += ncols;
1540: }
1541: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1542: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1543: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1545: ptap->reuse = MAT_REUSE_MATRIX;
1546: return(0);
1547: }
1549: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1550: {
1551: PetscErrorCode ierr;
1552: Mat_Merge_SeqsToMPI *merge;
1553: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1554: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1555: Mat_APMPI *ptap;
1556: PetscInt *adj;
1557: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1558: MatScalar *ada,*ca,valtmp;
1559: PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1560: MPI_Comm comm;
1561: PetscMPIInt size,rank,taga,*len_s;
1562: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1563: PetscInt **buf_ri,**buf_rj;
1564: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1565: MPI_Request *s_waits,*r_waits;
1566: MPI_Status *status;
1567: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1568: PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ;
1569: Mat A_loc;
1570: Mat_SeqAIJ *a_loc;
1573: MatCheckProduct(C,3);
1574: ptap = (Mat_APMPI*)C->product->data;
1575: if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1576: if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1577: PetscObjectGetComm((PetscObject)C,&comm);
1578: MPI_Comm_size(comm,&size);
1579: MPI_Comm_rank(comm,&rank);
1581: merge = ptap->merge;
1583: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1584: /*------------------------------------------*/
1585: /* get data from symbolic products */
1586: coi = merge->coi; coj = merge->coj;
1587: PetscCalloc1(coi[pon]+1,&coa);
1588: bi = merge->bi; bj = merge->bj;
1589: owners = merge->rowmap->range;
1590: PetscCalloc1(bi[cm]+1,&ba);
1592: /* get A_loc by taking all local rows of A */
1593: A_loc = ptap->A_loc;
1594: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1595: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1596: ai = a_loc->i;
1597: aj = a_loc->j;
1599: for (i=0; i<am; i++) {
1600: anz = ai[i+1] - ai[i];
1601: adj = aj + ai[i];
1602: ada = a_loc->a + ai[i];
1604: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1605: /*-------------------------------------------------------------*/
1606: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1607: pnz = po->i[i+1] - po->i[i];
1608: poJ = po->j + po->i[i];
1609: pA = po->a + po->i[i];
1610: for (j=0; j<pnz; j++) {
1611: row = poJ[j];
1612: cj = coj + coi[row];
1613: ca = coa + coi[row];
1614: /* perform sparse axpy */
1615: nexta = 0;
1616: valtmp = pA[j];
1617: for (k=0; nexta<anz; k++) {
1618: if (cj[k] == adj[nexta]) {
1619: ca[k] += valtmp*ada[nexta];
1620: nexta++;
1621: }
1622: }
1623: PetscLogFlops(2.0*anz);
1624: }
1626: /* put the value into Cd (diagonal part) */
1627: pnz = pd->i[i+1] - pd->i[i];
1628: pdJ = pd->j + pd->i[i];
1629: pA = pd->a + pd->i[i];
1630: for (j=0; j<pnz; j++) {
1631: row = pdJ[j];
1632: cj = bj + bi[row];
1633: ca = ba + bi[row];
1634: /* perform sparse axpy */
1635: nexta = 0;
1636: valtmp = pA[j];
1637: for (k=0; nexta<anz; k++) {
1638: if (cj[k] == adj[nexta]) {
1639: ca[k] += valtmp*ada[nexta];
1640: nexta++;
1641: }
1642: }
1643: PetscLogFlops(2.0*anz);
1644: }
1645: }
1647: /* 3) send and recv matrix values coa */
1648: /*------------------------------------*/
1649: buf_ri = merge->buf_ri;
1650: buf_rj = merge->buf_rj;
1651: len_s = merge->len_s;
1652: PetscCommGetNewTag(comm,&taga);
1653: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1655: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1656: for (proc=0,k=0; proc<size; proc++) {
1657: if (!len_s[proc]) continue;
1658: i = merge->owners_co[proc];
1659: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1660: k++;
1661: }
1662: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1663: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1665: PetscFree2(s_waits,status);
1666: PetscFree(r_waits);
1667: PetscFree(coa);
1669: /* 4) insert local Cseq and received values into Cmpi */
1670: /*----------------------------------------------------*/
1671: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1672: for (k=0; k<merge->nrecv; k++) {
1673: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1674: nrows = *(buf_ri_k[k]);
1675: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1676: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1677: }
1679: for (i=0; i<cm; i++) {
1680: row = owners[rank] + i; /* global row index of C_seq */
1681: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1682: ba_i = ba + bi[i];
1683: bnz = bi[i+1] - bi[i];
1684: /* add received vals into ba */
1685: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1686: /* i-th row */
1687: if (i == *nextrow[k]) {
1688: cnz = *(nextci[k]+1) - *nextci[k];
1689: cj = buf_rj[k] + *(nextci[k]);
1690: ca = abuf_r[k] + *(nextci[k]);
1691: nextcj = 0;
1692: for (j=0; nextcj<cnz; j++) {
1693: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1694: ba_i[j] += ca[nextcj++];
1695: }
1696: }
1697: nextrow[k]++; nextci[k]++;
1698: PetscLogFlops(2.0*cnz);
1699: }
1700: }
1701: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1702: }
1703: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1704: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1706: PetscFree(ba);
1707: PetscFree(abuf_r[0]);
1708: PetscFree(abuf_r);
1709: PetscFree3(buf_ri_k,nextrow,nextci);
1710: return(0);
1711: }
1713: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1714: {
1715: PetscErrorCode ierr;
1716: Mat A_loc,POt,PDt;
1717: Mat_APMPI *ptap;
1718: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1719: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data;
1720: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1721: PetscInt nnz;
1722: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1723: PetscInt am =A->rmap->n,pn=P->cmap->n;
1724: MPI_Comm comm;
1725: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1726: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1727: PetscInt len,proc,*dnz,*onz,*owners;
1728: PetscInt nzi,*bi,*bj;
1729: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1730: MPI_Request *swaits,*rwaits;
1731: MPI_Status *sstatus,rstatus;
1732: Mat_Merge_SeqsToMPI *merge;
1733: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1734: PetscReal afill =1.0,afill_tmp;
1735: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1736: Mat_SeqAIJ *a_loc,*pdt,*pot;
1737: PetscTable ta;
1738: MatType mtype;
1741: PetscObjectGetComm((PetscObject)A,&comm);
1742: /* check if matrix local sizes are compatible */
1743: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1745: MPI_Comm_size(comm,&size);
1746: MPI_Comm_rank(comm,&rank);
1748: /* create struct Mat_APMPI and attached it to C later */
1749: PetscNew(&ptap);
1751: /* get A_loc by taking all local rows of A */
1752: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1754: ptap->A_loc = A_loc;
1755: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1756: ai = a_loc->i;
1757: aj = a_loc->j;
1759: /* determine symbolic Co=(p->B)^T*A - send to others */
1760: /*----------------------------------------------------*/
1761: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1762: pdt = (Mat_SeqAIJ*)PDt->data;
1763: pdti = pdt->i; pdtj = pdt->j;
1765: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1766: pot = (Mat_SeqAIJ*)POt->data;
1767: poti = pot->i; potj = pot->j;
1769: /* then, compute symbolic Co = (p->B)^T*A */
1770: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1771: >= (num of nonzero rows of C_seq) - pn */
1772: PetscMalloc1(pon+1,&coi);
1773: coi[0] = 0;
1775: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1776: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1777: PetscFreeSpaceGet(nnz,&free_space);
1778: current_space = free_space;
1780: /* create and initialize a linked list */
1781: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1782: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1783: PetscTableGetCount(ta,&Armax);
1785: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1787: for (i=0; i<pon; i++) {
1788: pnz = poti[i+1] - poti[i];
1789: ptJ = potj + poti[i];
1790: for (j=0; j<pnz; j++) {
1791: row = ptJ[j]; /* row of A_loc == col of Pot */
1792: anz = ai[row+1] - ai[row];
1793: Jptr = aj + ai[row];
1794: /* add non-zero cols of AP into the sorted linked list lnk */
1795: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1796: }
1797: nnz = lnk[0];
1799: /* If free space is not available, double the total space in the list */
1800: if (current_space->local_remaining<nnz) {
1801: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1802: nspacedouble++;
1803: }
1805: /* Copy data into free space, and zero out denserows */
1806: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1808: current_space->array += nnz;
1809: current_space->local_used += nnz;
1810: current_space->local_remaining -= nnz;
1812: coi[i+1] = coi[i] + nnz;
1813: }
1815: PetscMalloc1(coi[pon]+1,&coj);
1816: PetscFreeSpaceContiguous(&free_space,coj);
1817: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1819: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1820: if (afill_tmp > afill) afill = afill_tmp;
1822: /* send j-array (coj) of Co to other processors */
1823: /*----------------------------------------------*/
1824: /* determine row ownership */
1825: PetscNew(&merge);
1826: PetscLayoutCreate(comm,&merge->rowmap);
1828: merge->rowmap->n = pn;
1829: merge->rowmap->bs = 1;
1831: PetscLayoutSetUp(merge->rowmap);
1832: owners = merge->rowmap->range;
1834: /* determine the number of messages to send, their lengths */
1835: PetscCalloc1(size,&len_si);
1836: PetscCalloc1(size,&merge->len_s);
1838: len_s = merge->len_s;
1839: merge->nsend = 0;
1841: PetscMalloc1(size+2,&owners_co);
1843: proc = 0;
1844: for (i=0; i<pon; i++) {
1845: while (prmap[i] >= owners[proc+1]) proc++;
1846: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1847: len_s[proc] += coi[i+1] - coi[i];
1848: }
1850: len = 0; /* max length of buf_si[] */
1851: owners_co[0] = 0;
1852: for (proc=0; proc<size; proc++) {
1853: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1854: if (len_si[proc]) {
1855: merge->nsend++;
1856: len_si[proc] = 2*(len_si[proc] + 1);
1857: len += len_si[proc];
1858: }
1859: }
1861: /* determine the number and length of messages to receive for coi and coj */
1862: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1863: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1865: /* post the Irecv and Isend of coj */
1866: PetscCommGetNewTag(comm,&tagj);
1867: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1868: PetscMalloc1(merge->nsend+1,&swaits);
1869: for (proc=0, k=0; proc<size; proc++) {
1870: if (!len_s[proc]) continue;
1871: i = owners_co[proc];
1872: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1873: k++;
1874: }
1876: /* receives and sends of coj are complete */
1877: PetscMalloc1(size,&sstatus);
1878: for (i=0; i<merge->nrecv; i++) {
1879: PETSC_UNUSED PetscMPIInt icompleted;
1880: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1881: }
1882: PetscFree(rwaits);
1883: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1885: /* add received column indices into table to update Armax */
1886: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1887: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1888: Jptr = buf_rj[k];
1889: for (j=0; j<merge->len_r[k]; j++) {
1890: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1891: }
1892: }
1893: PetscTableGetCount(ta,&Armax);
1894: /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */
1896: /* send and recv coi */
1897: /*-------------------*/
1898: PetscCommGetNewTag(comm,&tagi);
1899: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1900: PetscMalloc1(len+1,&buf_s);
1901: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1902: for (proc=0,k=0; proc<size; proc++) {
1903: if (!len_s[proc]) continue;
1904: /* form outgoing message for i-structure:
1905: buf_si[0]: nrows to be sent
1906: [1:nrows]: row index (global)
1907: [nrows+1:2*nrows+1]: i-structure index
1908: */
1909: /*-------------------------------------------*/
1910: nrows = len_si[proc]/2 - 1;
1911: buf_si_i = buf_si + nrows+1;
1912: buf_si[0] = nrows;
1913: buf_si_i[0] = 0;
1914: nrows = 0;
1915: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1916: nzi = coi[i+1] - coi[i];
1917: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1918: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1919: nrows++;
1920: }
1921: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1922: k++;
1923: buf_si += len_si[proc];
1924: }
1925: i = merge->nrecv;
1926: while (i--) {
1927: PETSC_UNUSED PetscMPIInt icompleted;
1928: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1929: }
1930: PetscFree(rwaits);
1931: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
1932: PetscFree(len_si);
1933: PetscFree(len_ri);
1934: PetscFree(swaits);
1935: PetscFree(sstatus);
1936: PetscFree(buf_s);
1938: /* compute the local portion of C (mpi mat) */
1939: /*------------------------------------------*/
1940: /* allocate bi array and free space for accumulating nonzero column info */
1941: PetscMalloc1(pn+1,&bi);
1942: bi[0] = 0;
1944: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1945: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1946: PetscFreeSpaceGet(nnz,&free_space);
1947: current_space = free_space;
1949: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1950: for (k=0; k<merge->nrecv; k++) {
1951: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1952: nrows = *buf_ri_k[k];
1953: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1954: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
1955: }
1957: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1958: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1959: rmax = 0;
1960: for (i=0; i<pn; i++) {
1961: /* add pdt[i,:]*AP into lnk */
1962: pnz = pdti[i+1] - pdti[i];
1963: ptJ = pdtj + pdti[i];
1964: for (j=0; j<pnz; j++) {
1965: row = ptJ[j]; /* row of AP == col of Pt */
1966: anz = ai[row+1] - ai[row];
1967: Jptr = aj + ai[row];
1968: /* add non-zero cols of AP into the sorted linked list lnk */
1969: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1970: }
1972: /* add received col data into lnk */
1973: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1974: if (i == *nextrow[k]) { /* i-th row */
1975: nzi = *(nextci[k]+1) - *nextci[k];
1976: Jptr = buf_rj[k] + *nextci[k];
1977: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
1978: nextrow[k]++; nextci[k]++;
1979: }
1980: }
1981: nnz = lnk[0];
1983: /* if free space is not available, make more free space */
1984: if (current_space->local_remaining<nnz) {
1985: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1986: nspacedouble++;
1987: }
1988: /* copy data into free space, then initialize lnk */
1989: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1990: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
1992: current_space->array += nnz;
1993: current_space->local_used += nnz;
1994: current_space->local_remaining -= nnz;
1996: bi[i+1] = bi[i] + nnz;
1997: if (nnz > rmax) rmax = nnz;
1998: }
1999: PetscFree3(buf_ri_k,nextrow,nextci);
2001: PetscMalloc1(bi[pn]+1,&bj);
2002: PetscFreeSpaceContiguous(&free_space,bj);
2003: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
2004: if (afill_tmp > afill) afill = afill_tmp;
2005: PetscLLCondensedDestroy_Scalable(lnk);
2006: PetscTableDestroy(&ta);
2008: MatDestroy(&POt);
2009: MatDestroy(&PDt);
2011: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
2012: /*-------------------------------------------------------------------------------*/
2013: MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
2014: MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
2015: MatGetType(A,&mtype);
2016: MatSetType(C,mtype);
2017: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
2018: MatPreallocateFinalize(dnz,onz);
2019: MatSetBlockSize(C,1);
2020: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
2021: for (i=0; i<pn; i++) {
2022: row = i + rstart;
2023: nnz = bi[i+1] - bi[i];
2024: Jptr = bj + bi[i];
2025: MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);
2026: }
2027: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2028: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2029: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
2030: merge->bi = bi;
2031: merge->bj = bj;
2032: merge->coi = coi;
2033: merge->coj = coj;
2034: merge->buf_ri = buf_ri;
2035: merge->buf_rj = buf_rj;
2036: merge->owners_co = owners_co;
2038: /* attach the supporting struct to C for reuse */
2039: C->product->data = ptap;
2040: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2041: ptap->merge = merge;
2043: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2045: #if defined(PETSC_USE_INFO)
2046: if (bi[pn] != 0) {
2047: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
2048: PetscInfo1(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
2049: } else {
2050: PetscInfo(C,"Empty matrix product\n");
2051: }
2052: #endif
2053: return(0);
2054: }
2056: /* ---------------------------------------------------------------- */
2057: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2058: {
2060: Mat_Product *product = C->product;
2061: Mat A=product->A,B=product->B;
2062: PetscReal fill=product->fill;
2063: PetscBool flg;
2066: /* scalable */
2067: PetscStrcmp(product->alg,"scalable",&flg);
2068: if (flg) {
2069: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
2070: goto next;
2071: }
2073: /* nonscalable */
2074: PetscStrcmp(product->alg,"nonscalable",&flg);
2075: if (flg) {
2076: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
2077: goto next;
2078: }
2080: /* matmatmult */
2081: PetscStrcmp(product->alg,"at*b",&flg);
2082: if (flg) {
2083: Mat At;
2084: Mat_APMPI *ptap;
2086: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
2087: MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);
2088: ptap = (Mat_APMPI*)C->product->data;
2089: if (ptap) {
2090: ptap->Pt = At;
2091: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2092: }
2093: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2094: goto next;
2095: }
2097: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2099: next:
2100: C->ops->productnumeric = MatProductNumeric_AtB;
2101: return(0);
2102: }
2104: /* ---------------------------------------------------------------- */
2105: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2106: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2107: {
2109: Mat_Product *product = C->product;
2110: Mat A=product->A,B=product->B;
2111: #if defined(PETSC_HAVE_HYPRE)
2112: const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"};
2113: PetscInt nalg = 4;
2114: #else
2115: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2116: PetscInt nalg = 3;
2117: #endif
2118: PetscInt alg = 1; /* set nonscalable algorithm as default */
2119: PetscBool flg;
2120: MPI_Comm comm;
2123: /* Check matrix local sizes */
2124: PetscObjectGetComm((PetscObject)C,&comm);
2125: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
2127: /* Set "nonscalable" as default algorithm */
2128: PetscStrcmp(C->product->alg,"default",&flg);
2129: if (flg) {
2130: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2132: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2133: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2134: MatInfo Ainfo,Binfo;
2135: PetscInt nz_local;
2136: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2138: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2139: MatGetInfo(B,MAT_LOCAL,&Binfo);
2140: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2142: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2143: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2145: if (alg_scalable) {
2146: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2147: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2148: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2149: }
2150: }
2151: }
2153: /* Get runtime option */
2154: if (product->api_user) {
2155: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2156: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2157: PetscOptionsEnd();
2158: } else {
2159: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2160: PetscOptionsEList("-matproduct_ab_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2161: PetscOptionsEnd();
2162: }
2163: if (flg) {
2164: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2165: }
2167: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2168: return(0);
2169: }
2171: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2172: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2173: {
2175: Mat_Product *product = C->product;
2176: Mat A=product->A,B=product->B;
2177: const char *algTypes[3] = {"scalable","nonscalable","at*b"};
2178: PetscInt nalg = 3;
2179: PetscInt alg = 1; /* set default algorithm */
2180: PetscBool flg;
2181: MPI_Comm comm;
2184: /* Check matrix local sizes */
2185: PetscObjectGetComm((PetscObject)C,&comm);
2186: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2188: /* Set default algorithm */
2189: PetscStrcmp(C->product->alg,"default",&flg);
2190: if (flg) {
2191: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2192: }
2194: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2195: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2196: MatInfo Ainfo,Binfo;
2197: PetscInt nz_local;
2198: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2200: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2201: MatGetInfo(B,MAT_LOCAL,&Binfo);
2202: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2204: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2205: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2207: if (alg_scalable) {
2208: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2209: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2210: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2211: }
2212: }
2214: /* Get runtime option */
2215: if (product->api_user) {
2216: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2217: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2218: PetscOptionsEnd();
2219: } else {
2220: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2221: PetscOptionsEList("-matproduct_atb_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2222: PetscOptionsEnd();
2223: }
2224: if (flg) {
2225: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2226: }
2228: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2229: return(0);
2230: }
2232: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2233: {
2235: Mat_Product *product = C->product;
2236: Mat A=product->A,P=product->B;
2237: MPI_Comm comm;
2238: PetscBool flg;
2239: PetscInt alg=1; /* set default algorithm */
2240: #if !defined(PETSC_HAVE_HYPRE)
2241: const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"};
2242: PetscInt nalg=4;
2243: #else
2244: const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"};
2245: PetscInt nalg=5;
2246: #endif
2247: PetscInt pN=P->cmap->N;
2250: /* Check matrix local sizes */
2251: PetscObjectGetComm((PetscObject)C,&comm);
2252: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
2253: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
2255: /* Set "nonscalable" as default algorithm */
2256: PetscStrcmp(C->product->alg,"default",&flg);
2257: if (flg) {
2258: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2260: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2261: if (pN > 100000) {
2262: MatInfo Ainfo,Pinfo;
2263: PetscInt nz_local;
2264: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2266: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2267: MatGetInfo(P,MAT_LOCAL,&Pinfo);
2268: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2270: if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2271: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2273: if (alg_scalable) {
2274: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2275: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2276: }
2277: }
2278: }
2280: /* Get runtime option */
2281: if (product->api_user) {
2282: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2283: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2284: PetscOptionsEnd();
2285: } else {
2286: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2287: PetscOptionsEList("-matproduct_ptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2288: PetscOptionsEnd();
2289: }
2290: if (flg) {
2291: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2292: }
2294: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2295: return(0);
2296: }
2298: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2299: {
2300: Mat_Product *product = C->product;
2301: Mat A = product->A,R=product->B;
2304: /* Check matrix local sizes */
2305: if (A->cmap->n != R->cmap->n || A->rmap->n != R->cmap->n) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%D, %D), R local (%D,%D)",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n);
2307: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2308: return(0);
2309: }
2311: /*
2312: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2313: */
2314: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2315: {
2317: Mat_Product *product = C->product;
2318: PetscBool flg = PETSC_FALSE;
2319: PetscInt alg = 1; /* default algorithm */
2320: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2321: PetscInt nalg = 3;
2324: /* Set default algorithm */
2325: PetscStrcmp(C->product->alg,"default",&flg);
2326: if (flg) {
2327: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2328: }
2330: /* Get runtime option */
2331: if (product->api_user) {
2332: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2333: PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2334: PetscOptionsEnd();
2335: } else {
2336: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2337: PetscOptionsEList("-matproduct_abc_via","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);
2338: PetscOptionsEnd();
2339: }
2340: if (flg) {
2341: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2342: }
2344: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2345: C->ops->productsymbolic = MatProductSymbolic_ABC;
2346: return(0);
2347: }
2349: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2350: {
2352: Mat_Product *product = C->product;
2355: switch (product->type) {
2356: case MATPRODUCT_AB:
2357: MatProductSetFromOptions_MPIAIJ_AB(C);
2358: break;
2359: case MATPRODUCT_AtB:
2360: MatProductSetFromOptions_MPIAIJ_AtB(C);
2361: break;
2362: case MATPRODUCT_PtAP:
2363: MatProductSetFromOptions_MPIAIJ_PtAP(C);
2364: break;
2365: case MATPRODUCT_RARt:
2366: MatProductSetFromOptions_MPIAIJ_RARt(C);
2367: break;
2368: case MATPRODUCT_ABC:
2369: MatProductSetFromOptions_MPIAIJ_ABC(C);
2370: break;
2371: default:
2372: break;
2373: }
2374: return(0);
2375: }