Actual source code: mpimatmatmult.c
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
12: #include <petsc/private/sfimpl.h>
14: #if defined(PETSC_HAVE_HYPRE)
15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16: #endif
18: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19: {
20: Mat_Product *product = C->product;
21: Mat A=product->A,B=product->B;
22: MatProductAlgorithm alg=product->alg;
23: PetscReal fill=product->fill;
24: PetscBool flg;
26: /* scalable */
27: PetscStrcmp(alg,"scalable",&flg);
28: if (flg) {
29: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
30: return 0;
31: }
33: /* nonscalable */
34: PetscStrcmp(alg,"nonscalable",&flg);
35: if (flg) {
36: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
37: return 0;
38: }
40: /* seqmpi */
41: PetscStrcmp(alg,"seqmpi",&flg);
42: if (flg) {
43: MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);
44: return 0;
45: }
47: /* backend general code */
48: PetscStrcmp(alg,"backend",&flg);
49: if (flg) {
50: MatProductSymbolic_MPIAIJBACKEND(C);
51: return 0;
52: }
54: #if defined(PETSC_HAVE_HYPRE)
55: PetscStrcmp(alg,"hypre",&flg);
56: if (flg) {
57: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
58: return 0;
59: }
60: #endif
61: SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
62: }
64: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
65: {
66: Mat_APMPI *ptap = (Mat_APMPI*)data;
68: PetscFree2(ptap->startsj_s,ptap->startsj_r);
69: PetscFree(ptap->bufa);
70: MatDestroy(&ptap->P_loc);
71: MatDestroy(&ptap->P_oth);
72: MatDestroy(&ptap->Pt);
73: PetscFree(ptap->api);
74: PetscFree(ptap->apj);
75: PetscFree(ptap->apa);
76: PetscFree(ptap);
77: return 0;
78: }
80: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
81: {
82: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
83: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
84: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
85: PetscScalar *cda=cd->a,*coa=co->a;
86: Mat_SeqAIJ *p_loc,*p_oth;
87: PetscScalar *apa,*ca;
88: PetscInt cm =C->rmap->n;
89: Mat_APMPI *ptap;
90: PetscInt *api,*apj,*apJ,i,k;
91: PetscInt cstart=C->cmap->rstart;
92: PetscInt cdnz,conz,k0,k1;
93: const PetscScalar *dummy;
94: MPI_Comm comm;
95: PetscMPIInt size;
97: MatCheckProduct(C,3);
98: ptap = (Mat_APMPI*)C->product->data;
100: PetscObjectGetComm((PetscObject)A,&comm);
101: MPI_Comm_size(comm,&size);
105: /* flag CPU mask for C */
106: #if defined(PETSC_HAVE_DEVICE)
107: if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
108: if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
109: if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
110: #endif
112: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
113: /*-----------------------------------------------------*/
114: /* update numerical values of P_oth and P_loc */
115: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
116: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
118: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
119: /*----------------------------------------------------------*/
120: /* get data from symbolic products */
121: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
122: p_oth = NULL;
123: if (size >1) {
124: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
125: }
127: /* get apa for storing dense row A[i,:]*P */
128: apa = ptap->apa;
130: api = ptap->api;
131: apj = ptap->apj;
132: /* trigger copy to CPU */
133: MatSeqAIJGetArrayRead(a->A,&dummy);
134: MatSeqAIJRestoreArrayRead(a->A,&dummy);
135: MatSeqAIJGetArrayRead(a->B,&dummy);
136: MatSeqAIJRestoreArrayRead(a->B,&dummy);
137: for (i=0; i<cm; i++) {
138: /* compute apa = A[i,:]*P */
139: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
141: /* set values in C */
142: apJ = apj + api[i];
143: cdnz = cd->i[i+1] - cd->i[i];
144: conz = co->i[i+1] - co->i[i];
146: /* 1st off-diagonal part of C */
147: ca = coa + co->i[i];
148: k = 0;
149: for (k0=0; k0<conz; k0++) {
150: if (apJ[k] >= cstart) break;
151: ca[k0] = apa[apJ[k]];
152: apa[apJ[k++]] = 0.0;
153: }
155: /* diagonal part of C */
156: ca = cda + cd->i[i];
157: for (k1=0; k1<cdnz; k1++) {
158: ca[k1] = apa[apJ[k]];
159: apa[apJ[k++]] = 0.0;
160: }
162: /* 2nd off-diagonal part of C */
163: ca = coa + co->i[i];
164: for (; k0<conz; k0++) {
165: ca[k0] = apa[apJ[k]];
166: apa[apJ[k++]] = 0.0;
167: }
168: }
169: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
170: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
171: return 0;
172: }
174: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
175: {
176: PetscErrorCode ierr;
177: MPI_Comm comm;
178: PetscMPIInt size;
179: Mat_APMPI *ptap;
180: PetscFreeSpaceList free_space=NULL,current_space=NULL;
181: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
182: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
183: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
184: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
185: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
186: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
187: PetscBT lnkbt;
188: PetscReal afill;
189: MatType mtype;
191: MatCheckProduct(C,4);
193: PetscObjectGetComm((PetscObject)A,&comm);
194: MPI_Comm_size(comm,&size);
196: /* create struct Mat_APMPI and attached it to C later */
197: PetscNew(&ptap);
199: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
200: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
202: /* get P_loc by taking all local rows of P */
203: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
205: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
206: pi_loc = p_loc->i; pj_loc = p_loc->j;
207: if (size > 1) {
208: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
209: pi_oth = p_oth->i; pj_oth = p_oth->j;
210: } else {
211: p_oth = NULL;
212: pi_oth = NULL; pj_oth = NULL;
213: }
215: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
216: /*-------------------------------------------------------------------*/
217: PetscMalloc1(am+2,&api);
218: ptap->api = api;
219: api[0] = 0;
221: /* create and initialize a linked list */
222: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
224: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
225: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
226: current_space = free_space;
228: MatPreallocateInitialize(comm,am,pn,dnz,onz);
229: for (i=0; i<am; i++) {
230: /* diagonal portion of A */
231: nzi = adi[i+1] - adi[i];
232: for (j=0; j<nzi; j++) {
233: row = *adj++;
234: pnz = pi_loc[row+1] - pi_loc[row];
235: Jptr = pj_loc + pi_loc[row];
236: /* add non-zero cols of P into the sorted linked list lnk */
237: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
238: }
239: /* off-diagonal portion of A */
240: nzi = aoi[i+1] - aoi[i];
241: for (j=0; j<nzi; j++) {
242: row = *aoj++;
243: pnz = pi_oth[row+1] - pi_oth[row];
244: Jptr = pj_oth + pi_oth[row];
245: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
246: }
247: /* add possible missing diagonal entry */
248: if (C->force_diagonals) {
249: j = i + rstart; /* column index */
250: PetscLLCondensedAddSorted(1,&j,lnk,lnkbt);
251: }
253: apnz = lnk[0];
254: api[i+1] = api[i] + apnz;
256: /* if free space is not available, double the total space in the list */
257: if (current_space->local_remaining<apnz) {
258: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
259: nspacedouble++;
260: }
262: /* Copy data into free space, then initialize lnk */
263: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
264: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
266: current_space->array += apnz;
267: current_space->local_used += apnz;
268: current_space->local_remaining -= apnz;
269: }
271: /* Allocate space for apj, initialize apj, and */
272: /* destroy list of free space and other temporary array(s) */
273: PetscMalloc1(api[am]+1,&ptap->apj);
274: apj = ptap->apj;
275: PetscFreeSpaceContiguous(&free_space,ptap->apj);
276: PetscLLDestroy(lnk,lnkbt);
278: /* malloc apa to store dense row A[i,:]*P */
279: PetscCalloc1(pN,&ptap->apa);
281: /* set and assemble symbolic parallel matrix C */
282: /*---------------------------------------------*/
283: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
284: MatSetBlockSizesFromMats(C,A,P);
286: MatGetType(A,&mtype);
287: MatSetType(C,mtype);
288: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
289: MatPreallocateFinalize(dnz,onz);
291: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
292: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
293: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
294: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
296: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
297: C->ops->productnumeric = MatProductNumeric_AB;
299: /* attach the supporting struct to C for reuse */
300: C->product->data = ptap;
301: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
303: /* set MatInfo */
304: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
305: if (afill < 1.0) afill = 1.0;
306: C->info.mallocs = nspacedouble;
307: C->info.fill_ratio_given = fill;
308: C->info.fill_ratio_needed = afill;
310: #if defined(PETSC_USE_INFO)
311: if (api[am]) {
312: PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
313: PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
314: } else {
315: PetscInfo(C,"Empty matrix product\n");
316: }
317: #endif
318: return 0;
319: }
321: /* ------------------------------------------------------- */
322: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
323: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
325: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
326: {
327: Mat_Product *product = C->product;
328: Mat A = product->A,B=product->B;
330: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
331: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
333: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
334: C->ops->productsymbolic = MatProductSymbolic_AB;
335: return 0;
336: }
337: /* -------------------------------------------------------------------- */
338: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
339: {
340: Mat_Product *product = C->product;
341: Mat A = product->A,B=product->B;
343: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
344: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
346: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
347: C->ops->productsymbolic = MatProductSymbolic_AtB;
348: return 0;
349: }
351: /* --------------------------------------------------------------------- */
352: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
353: {
354: Mat_Product *product = C->product;
356: switch (product->type) {
357: case MATPRODUCT_AB:
358: MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
359: break;
360: case MATPRODUCT_AtB:
361: MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
362: break;
363: default:
364: break;
365: }
366: return 0;
367: }
368: /* ------------------------------------------------------- */
370: typedef struct {
371: Mat workB,workB1;
372: MPI_Request *rwaits,*swaits;
373: PetscInt nsends,nrecvs;
374: MPI_Datatype *stype,*rtype;
375: PetscInt blda;
376: } MPIAIJ_MPIDense;
378: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
379: {
380: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
381: PetscInt i;
383: MatDestroy(&contents->workB);
384: MatDestroy(&contents->workB1);
385: for (i=0; i<contents->nsends; i++) {
386: MPI_Type_free(&contents->stype[i]);
387: }
388: for (i=0; i<contents->nrecvs; i++) {
389: MPI_Type_free(&contents->rtype[i]);
390: }
391: PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);
392: PetscFree(contents);
393: return 0;
394: }
396: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
397: {
398: PetscErrorCode ierr;
399: Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data;
400: PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,m,M,n,N;
401: MPIAIJ_MPIDense *contents;
402: VecScatter ctx=aij->Mvctx;
403: PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb;
404: MPI_Comm comm;
405: MPI_Datatype type1,*stype,*rtype;
406: const PetscInt *sindices,*sstarts,*rstarts;
407: PetscMPIInt *disp;
408: PetscBool cisdense;
410: MatCheckProduct(C,4);
412: PetscObjectGetComm((PetscObject)A,&comm);
413: PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense);
414: if (!cisdense) {
415: MatSetType(C,((PetscObject)B)->type_name);
416: }
417: MatGetLocalSize(C,&m,&n);
418: MatGetSize(C,&M,&N);
419: if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) {
420: MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);
421: }
422: MatSetBlockSizesFromMats(C,A,B);
423: MatSetUp(C);
424: MatDenseGetLDA(B,&blda);
425: PetscNew(&contents);
427: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
428: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
430: /* Create column block of B and C for memory scalability when BN is too large */
431: /* Estimate Bbn, column size of Bb */
432: if (nz) {
433: Bbn1 = 2*Am*BN/nz;
434: if (!Bbn1) Bbn1 = 1;
435: } else Bbn1 = BN;
437: bs = PetscAbs(B->cmap->bs);
438: Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
439: if (Bbn1 > BN) Bbn1 = BN;
440: MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);
442: /* Enable runtime option for Bbn */
443: PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
444: PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);
445: PetscOptionsEnd();
446: Bbn = PetscMin(Bbn,BN);
448: if (Bbn > 0 && Bbn < BN) {
449: numBb = BN/Bbn;
450: Bbn1 = BN - numBb*Bbn;
451: } else numBb = 0;
453: if (numBb) {
454: PetscInfo(C,"use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n",BN,Bbn,numBb);
455: if (Bbn1) { /* Create workB1 for the remaining columns */
456: PetscInfo(C,"use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n",BN,Bbn1);
457: /* Create work matrix used to store off processor rows of B needed for local product */
458: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);
459: } else contents->workB1 = NULL;
460: }
462: /* Create work matrix used to store off processor rows of B needed for local product */
463: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);
465: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
466: PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);
467: contents->stype = stype;
468: contents->nsends = nsends;
470: contents->rtype = rtype;
471: contents->nrecvs = nrecvs;
472: contents->blda = blda;
474: PetscMalloc1(Bm+1,&disp);
475: for (i=0; i<nsends; i++) {
476: nrows_to = sstarts[i+1]-sstarts[i];
477: for (j=0; j<nrows_to; j++) disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
478: MPI_Type_create_indexed_block(nrows_to,1,disp,MPIU_SCALAR,&type1);
479: MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]);
480: MPI_Type_commit(&stype[i]);
481: MPI_Type_free(&type1);
482: }
484: for (i=0; i<nrecvs; i++) {
485: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
486: nrows_from = rstarts[i+1]-rstarts[i];
487: disp[0] = 0;
488: MPI_Type_create_indexed_block(1,nrows_from,disp,MPIU_SCALAR,&type1);
489: MPI_Type_create_resized(type1,0,nz*sizeof(PetscScalar),&rtype[i]);
490: MPI_Type_commit(&rtype[i]);
491: MPI_Type_free(&type1);
492: }
494: PetscFree(disp);
495: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
496: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
497: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
498: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
499: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
500: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
502: C->product->data = contents;
503: C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
504: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
505: return 0;
506: }
508: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool);
509: /*
510: Performs an efficient scatter on the rows of B needed by this process; this is
511: a modification of the VecScatterBegin_() routines.
513: Input: Bbidx = 0: B = Bb
514: = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
515: */
516: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
517: {
518: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
519: const PetscScalar *b;
520: PetscScalar *rvalues;
521: VecScatter ctx = aij->Mvctx;
522: const PetscInt *sindices,*sstarts,*rstarts;
523: const PetscMPIInt *sprocs,*rprocs;
524: PetscInt i,nsends,nrecvs;
525: MPI_Request *swaits,*rwaits;
526: MPI_Comm comm;
527: PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi;
528: MPIAIJ_MPIDense *contents;
529: Mat workB;
530: MPI_Datatype *stype,*rtype;
531: PetscInt blda;
533: MatCheckProduct(C,4);
535: contents = (MPIAIJ_MPIDense*)C->product->data;
536: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);
537: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);
538: PetscMPIIntCast(nsends,&nsends_mpi);
539: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
540: if (Bbidx == 0) workB = *outworkB = contents->workB;
541: else workB = *outworkB = contents->workB1;
543: swaits = contents->swaits;
544: rwaits = contents->rwaits;
546: MatDenseGetArrayRead(B,&b);
547: MatDenseGetLDA(B,&blda);
549: MatDenseGetArray(workB,&rvalues);
551: /* Post recv, use MPI derived data type to save memory */
552: PetscObjectGetComm((PetscObject)C,&comm);
553: rtype = contents->rtype;
554: for (i=0; i<nrecvs; i++) {
555: MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);
556: }
558: stype = contents->stype;
559: for (i=0; i<nsends; i++) {
560: MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);
561: }
563: if (nrecvs) MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE);
564: if (nsends) MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);
566: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);
567: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);
568: MatDenseRestoreArrayRead(B,&b);
569: MatDenseRestoreArray(workB,&rvalues);
570: return 0;
571: }
573: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
574: {
575: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
576: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
577: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
578: Mat workB;
579: MPIAIJ_MPIDense *contents;
581: MatCheckProduct(C,3);
583: contents = (MPIAIJ_MPIDense*)C->product->data;
584: /* diagonal block of A times all local rows of B */
585: /* TODO: this calls a symbolic multiplication every time, which could be avoided */
586: MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A);
587: if (contents->workB->cmap->n == B->cmap->N) {
588: /* get off processor parts of B needed to complete C=A*B */
589: MatMPIDenseScatter(A,B,0,C,&workB);
591: /* off-diagonal block of A times nonlocal rows of B */
592: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
593: } else {
594: Mat Bb,Cb;
595: PetscInt BN=B->cmap->N,n=contents->workB->cmap->n,i;
596: PetscBool ccpu;
599: /* Prevent from unneeded copies back and forth from the GPU
600: when getting and restoring the submatrix
601: We need a proper GPU code for AIJ * dense in parallel */
602: MatBoundToCPU(C,&ccpu);
603: MatBindToCPU(C,PETSC_TRUE);
604: for (i=0; i<BN; i+=n) {
605: MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb);
606: MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb);
608: /* get off processor parts of B needed to complete C=A*B */
609: MatMPIDenseScatter(A,Bb,(i+n)>BN,C,&workB);
611: /* off-diagonal block of A times nonlocal rows of B */
612: cdense = (Mat_MPIDense*)Cb->data;
613: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);
614: MatDenseRestoreSubMatrix(B,&Bb);
615: MatDenseRestoreSubMatrix(C,&Cb);
616: }
617: MatBindToCPU(C,ccpu);
618: }
619: return 0;
620: }
622: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
623: {
624: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
625: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
626: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
627: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
628: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
629: Mat_SeqAIJ *p_loc,*p_oth;
630: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
631: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
632: PetscInt cm = C->rmap->n,anz,pnz;
633: Mat_APMPI *ptap;
634: PetscScalar *apa_sparse;
635: const PetscScalar *dummy;
636: PetscInt *api,*apj,*apJ,i,j,k,row;
637: PetscInt cstart = C->cmap->rstart;
638: PetscInt cdnz,conz,k0,k1,nextp;
639: MPI_Comm comm;
640: PetscMPIInt size;
642: MatCheckProduct(C,3);
643: ptap = (Mat_APMPI*)C->product->data;
645: PetscObjectGetComm((PetscObject)C,&comm);
646: MPI_Comm_size(comm,&size);
649: /* flag CPU mask for C */
650: #if defined(PETSC_HAVE_DEVICE)
651: if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
652: if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
653: if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
654: #endif
655: apa_sparse = ptap->apa;
657: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
658: /*-----------------------------------------------------*/
659: /* update numerical values of P_oth and P_loc */
660: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
661: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
663: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
664: /*----------------------------------------------------------*/
665: /* get data from symbolic products */
666: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
667: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
668: if (size >1) {
669: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
670: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
671: } else {
672: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
673: }
675: /* trigger copy to CPU */
676: MatSeqAIJGetArrayRead(a->A,&dummy);
677: MatSeqAIJRestoreArrayRead(a->A,&dummy);
678: MatSeqAIJGetArrayRead(a->B,&dummy);
679: MatSeqAIJRestoreArrayRead(a->B,&dummy);
680: api = ptap->api;
681: apj = ptap->apj;
682: for (i=0; i<cm; i++) {
683: apJ = apj + api[i];
685: /* diagonal portion of A */
686: anz = adi[i+1] - adi[i];
687: adj = ad->j + adi[i];
688: ada = ad->a + adi[i];
689: for (j=0; j<anz; j++) {
690: row = adj[j];
691: pnz = pi_loc[row+1] - pi_loc[row];
692: pj = pj_loc + pi_loc[row];
693: pa = pa_loc + pi_loc[row];
694: /* perform sparse axpy */
695: valtmp = ada[j];
696: nextp = 0;
697: for (k=0; nextp<pnz; k++) {
698: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
699: apa_sparse[k] += valtmp*pa[nextp++];
700: }
701: }
702: PetscLogFlops(2.0*pnz);
703: }
705: /* off-diagonal portion of A */
706: anz = aoi[i+1] - aoi[i];
707: aoj = ao->j + aoi[i];
708: aoa = ao->a + aoi[i];
709: for (j=0; j<anz; j++) {
710: row = aoj[j];
711: pnz = pi_oth[row+1] - pi_oth[row];
712: pj = pj_oth + pi_oth[row];
713: pa = pa_oth + pi_oth[row];
714: /* perform sparse axpy */
715: valtmp = aoa[j];
716: nextp = 0;
717: for (k=0; nextp<pnz; k++) {
718: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
719: apa_sparse[k] += valtmp*pa[nextp++];
720: }
721: }
722: PetscLogFlops(2.0*pnz);
723: }
725: /* set values in C */
726: cdnz = cd->i[i+1] - cd->i[i];
727: conz = co->i[i+1] - co->i[i];
729: /* 1st off-diagonal part of C */
730: ca = coa + co->i[i];
731: k = 0;
732: for (k0=0; k0<conz; k0++) {
733: if (apJ[k] >= cstart) break;
734: ca[k0] = apa_sparse[k];
735: apa_sparse[k] = 0.0;
736: k++;
737: }
739: /* diagonal part of C */
740: ca = cda + cd->i[i];
741: for (k1=0; k1<cdnz; k1++) {
742: ca[k1] = apa_sparse[k];
743: apa_sparse[k] = 0.0;
744: k++;
745: }
747: /* 2nd off-diagonal part of C */
748: ca = coa + co->i[i];
749: for (; k0<conz; k0++) {
750: ca[k0] = apa_sparse[k];
751: apa_sparse[k] = 0.0;
752: k++;
753: }
754: }
755: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
756: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
757: return 0;
758: }
760: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
761: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
762: {
763: PetscErrorCode ierr;
764: MPI_Comm comm;
765: PetscMPIInt size;
766: Mat_APMPI *ptap;
767: PetscFreeSpaceList free_space = NULL,current_space=NULL;
768: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
769: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
770: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
771: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
772: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=1;
773: PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
774: PetscReal afill;
775: MatType mtype;
777: MatCheckProduct(C,4);
779: PetscObjectGetComm((PetscObject)A,&comm);
780: MPI_Comm_size(comm,&size);
782: /* create struct Mat_APMPI and attached it to C later */
783: PetscNew(&ptap);
785: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
786: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
788: /* get P_loc by taking all local rows of P */
789: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
791: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
792: pi_loc = p_loc->i; pj_loc = p_loc->j;
793: if (size > 1) {
794: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
795: pi_oth = p_oth->i; pj_oth = p_oth->j;
796: } else {
797: p_oth = NULL;
798: pi_oth = NULL; pj_oth = NULL;
799: }
801: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
802: /*-------------------------------------------------------------------*/
803: PetscMalloc1(am+2,&api);
804: ptap->api = api;
805: api[0] = 0;
807: PetscLLCondensedCreate_Scalable(lsize,&lnk);
809: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
810: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
811: current_space = free_space;
812: MatPreallocateInitialize(comm,am,pn,dnz,onz);
813: for (i=0; i<am; i++) {
814: /* diagonal portion of A */
815: nzi = adi[i+1] - adi[i];
816: for (j=0; j<nzi; j++) {
817: row = *adj++;
818: pnz = pi_loc[row+1] - pi_loc[row];
819: Jptr = pj_loc + pi_loc[row];
820: /* Expand list if it is not long enough */
821: if (pnz+apnz_max > lsize) {
822: lsize = pnz+apnz_max;
823: PetscLLCondensedExpand_Scalable(lsize, &lnk);
824: }
825: /* add non-zero cols of P into the sorted linked list lnk */
826: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
827: apnz = *lnk; /* The first element in the list is the number of items in the list */
828: api[i+1] = api[i] + apnz;
829: if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
830: }
831: /* off-diagonal portion of A */
832: nzi = aoi[i+1] - aoi[i];
833: for (j=0; j<nzi; j++) {
834: row = *aoj++;
835: pnz = pi_oth[row+1] - pi_oth[row];
836: Jptr = pj_oth + pi_oth[row];
837: /* Expand list if it is not long enough */
838: if (pnz+apnz_max > lsize) {
839: lsize = pnz + apnz_max;
840: PetscLLCondensedExpand_Scalable(lsize, &lnk);
841: }
842: /* add non-zero cols of P into the sorted linked list lnk */
843: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
844: apnz = *lnk; /* The first element in the list is the number of items in the list */
845: api[i+1] = api[i] + apnz;
846: if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
847: }
849: /* add missing diagonal entry */
850: if (C->force_diagonals) {
851: j = i + rstart; /* column index */
852: PetscLLCondensedAddSorted_Scalable(1,&j,lnk);
853: }
855: apnz = *lnk;
856: api[i+1] = api[i] + apnz;
857: if (apnz > apnz_max) apnz_max = apnz;
859: /* if free space is not available, double the total space in the list */
860: if (current_space->local_remaining<apnz) {
861: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
862: nspacedouble++;
863: }
865: /* Copy data into free space, then initialize lnk */
866: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
867: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
869: current_space->array += apnz;
870: current_space->local_used += apnz;
871: current_space->local_remaining -= apnz;
872: }
874: /* Allocate space for apj, initialize apj, and */
875: /* destroy list of free space and other temporary array(s) */
876: PetscMalloc1(api[am]+1,&ptap->apj);
877: apj = ptap->apj;
878: PetscFreeSpaceContiguous(&free_space,ptap->apj);
879: PetscLLCondensedDestroy_Scalable(lnk);
881: /* create and assemble symbolic parallel matrix C */
882: /*----------------------------------------------------*/
883: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
884: MatSetBlockSizesFromMats(C,A,P);
885: MatGetType(A,&mtype);
886: MatSetType(C,mtype);
887: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
888: MatPreallocateFinalize(dnz,onz);
890: /* malloc apa for assembly C */
891: PetscCalloc1(apnz_max,&ptap->apa);
893: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
894: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
895: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
896: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
898: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
899: C->ops->productnumeric = MatProductNumeric_AB;
901: /* attach the supporting struct to C for reuse */
902: C->product->data = ptap;
903: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
905: /* set MatInfo */
906: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
907: if (afill < 1.0) afill = 1.0;
908: C->info.mallocs = nspacedouble;
909: C->info.fill_ratio_given = fill;
910: C->info.fill_ratio_needed = afill;
912: #if defined(PETSC_USE_INFO)
913: if (api[am]) {
914: PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
915: PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
916: } else {
917: PetscInfo(C,"Empty matrix product\n");
918: }
919: #endif
920: return 0;
921: }
923: /* This function is needed for the seqMPI matrix-matrix multiplication. */
924: /* Three input arrays are merged to one output array. The size of the */
925: /* output array is also output. Duplicate entries only show up once. */
926: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1,
927: PetscInt size2, PetscInt *in2,
928: PetscInt size3, PetscInt *in3,
929: PetscInt *size4, PetscInt *out)
930: {
931: int i = 0, j = 0, k = 0, l = 0;
933: /* Traverse all three arrays */
934: while (i<size1 && j<size2 && k<size3) {
935: if (in1[i] < in2[j] && in1[i] < in3[k]) {
936: out[l++] = in1[i++];
937: }
938: else if (in2[j] < in1[i] && in2[j] < in3[k]) {
939: out[l++] = in2[j++];
940: }
941: else if (in3[k] < in1[i] && in3[k] < in2[j]) {
942: out[l++] = in3[k++];
943: }
944: else if (in1[i] == in2[j] && in1[i] < in3[k]) {
945: out[l++] = in1[i];
946: i++, j++;
947: }
948: else if (in1[i] == in3[k] && in1[i] < in2[j]) {
949: out[l++] = in1[i];
950: i++, k++;
951: }
952: else if (in3[k] == in2[j] && in2[j] < in1[i]) {
953: out[l++] = in2[j];
954: k++, j++;
955: }
956: else if (in1[i] == in2[j] && in1[i] == in3[k]) {
957: out[l++] = in1[i];
958: i++, j++, k++;
959: }
960: }
962: /* Traverse two remaining arrays */
963: while (i<size1 && j<size2) {
964: if (in1[i] < in2[j]) {
965: out[l++] = in1[i++];
966: }
967: else if (in1[i] > in2[j]) {
968: out[l++] = in2[j++];
969: }
970: else {
971: out[l++] = in1[i];
972: i++, j++;
973: }
974: }
976: while (i<size1 && k<size3) {
977: if (in1[i] < in3[k]) {
978: out[l++] = in1[i++];
979: }
980: else if (in1[i] > in3[k]) {
981: out[l++] = in3[k++];
982: }
983: else {
984: out[l++] = in1[i];
985: i++, k++;
986: }
987: }
989: while (k<size3 && j<size2) {
990: if (in3[k] < in2[j]) {
991: out[l++] = in3[k++];
992: }
993: else if (in3[k] > in2[j]) {
994: out[l++] = in2[j++];
995: }
996: else {
997: out[l++] = in3[k];
998: k++, j++;
999: }
1000: }
1002: /* Traverse one remaining array */
1003: while (i<size1) out[l++] = in1[i++];
1004: while (j<size2) out[l++] = in2[j++];
1005: while (k<size3) out[l++] = in3[k++];
1007: *size4 = l;
1008: }
1010: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
1011: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
1012: /* matrix-matrix multiplications. */
1013: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1014: {
1015: PetscErrorCode ierr;
1016: MPI_Comm comm;
1017: PetscMPIInt size;
1018: Mat_APMPI *ptap;
1019: PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
1020: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
1021: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
1022: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1023: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
1024: PetscInt adponz, adpdnz;
1025: PetscInt *pi_loc,*dnz,*onz;
1026: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
1027: PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
1028: *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1029: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
1030: PetscBT lnkbt;
1031: PetscReal afill;
1032: PetscMPIInt rank;
1033: Mat adpd, aopoth;
1034: MatType mtype;
1035: const char *prefix;
1037: MatCheckProduct(C,4);
1039: PetscObjectGetComm((PetscObject)A,&comm);
1040: MPI_Comm_size(comm,&size);
1041: MPI_Comm_rank(comm, &rank);
1042: MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);
1044: /* create struct Mat_APMPI and attached it to C later */
1045: PetscNew(&ptap);
1047: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1048: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1050: /* get P_loc by taking all local rows of P */
1051: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
1053: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1054: pi_loc = p_loc->i;
1056: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1057: PetscMalloc1(am+2,&api);
1058: PetscMalloc1(am+2,&adpoi);
1060: adpoi[0] = 0;
1061: ptap->api = api;
1062: api[0] = 0;
1064: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1065: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
1066: MatPreallocateInitialize(comm,am,pn,dnz,onz);
1068: /* Symbolic calc of A_loc_diag * P_loc_diag */
1069: MatGetOptionsPrefix(A,&prefix);
1070: MatProductCreate(a->A,p->A,NULL,&adpd);
1071: MatGetOptionsPrefix(A,&prefix);
1072: MatSetOptionsPrefix(adpd,prefix);
1073: MatAppendOptionsPrefix(adpd,"inner_diag_");
1075: MatProductSetType(adpd,MATPRODUCT_AB);
1076: MatProductSetAlgorithm(adpd,"sorted");
1077: MatProductSetFill(adpd,fill);
1078: MatProductSetFromOptions(adpd);
1080: adpd->force_diagonals = C->force_diagonals;
1081: MatProductSymbolic(adpd);
1083: adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1084: adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1085: p_off = (Mat_SeqAIJ*)((p->B)->data);
1086: poff_i = p_off->i; poff_j = p_off->j;
1088: /* j_temp stores indices of a result row before they are added to the linked list */
1089: PetscMalloc1(pN+2,&j_temp);
1091: /* Symbolic calc of the A_diag * p_loc_off */
1092: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1093: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);
1094: current_space = free_space_diag;
1096: for (i=0; i<am; i++) {
1097: /* A_diag * P_loc_off */
1098: nzi = adi[i+1] - adi[i];
1099: for (j=0; j<nzi; j++) {
1100: row = *adj++;
1101: pnz = poff_i[row+1] - poff_i[row];
1102: Jptr = poff_j + poff_i[row];
1103: for (i1 = 0; i1 < pnz; i1++) {
1104: j_temp[i1] = p->garray[Jptr[i1]];
1105: }
1106: /* add non-zero cols of P into the sorted linked list lnk */
1107: PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);
1108: }
1110: adponz = lnk[0];
1111: adpoi[i+1] = adpoi[i] + adponz;
1113: /* if free space is not available, double the total space in the list */
1114: if (current_space->local_remaining<adponz) {
1115: PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);
1116: nspacedouble++;
1117: }
1119: /* Copy data into free space, then initialize lnk */
1120: PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);
1122: current_space->array += adponz;
1123: current_space->local_used += adponz;
1124: current_space->local_remaining -= adponz;
1125: }
1127: /* Symbolic calc of A_off * P_oth */
1128: MatSetOptionsPrefix(a->B,prefix);
1129: MatAppendOptionsPrefix(a->B,"inner_offdiag_");
1130: MatCreate(PETSC_COMM_SELF,&aopoth);
1131: MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1132: aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1133: aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1135: /* Allocate space for apj, adpj, aopj, ... */
1136: /* destroy lists of free space and other temporary array(s) */
1138: PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);
1139: PetscMalloc1(adpoi[am]+2, &adpoj);
1141: /* Copy from linked list to j-array */
1142: PetscFreeSpaceContiguous(&free_space_diag,adpoj);
1143: PetscLLDestroy(lnk,lnkbt);
1145: adpoJ = adpoj;
1146: adpdJ = adpdj;
1147: aopJ = aopothj;
1148: apj = ptap->apj;
1149: apJ = apj; /* still empty */
1151: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1152: /* A_diag * P_loc_diag to get A*P */
1153: for (i = 0; i < am; i++) {
1154: aopnz = aopothi[i+1] - aopothi[i];
1155: adponz = adpoi[i+1] - adpoi[i];
1156: adpdnz = adpdi[i+1] - adpdi[i];
1158: /* Correct indices from A_diag*P_diag */
1159: for (i1 = 0; i1 < adpdnz; i1++) {
1160: adpdJ[i1] += p_colstart;
1161: }
1162: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1163: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1164: MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);
1166: aopJ += aopnz;
1167: adpoJ += adponz;
1168: adpdJ += adpdnz;
1169: apJ += apnz;
1170: api[i+1] = api[i] + apnz;
1171: }
1173: /* malloc apa to store dense row A[i,:]*P */
1174: PetscCalloc1(pN+2,&ptap->apa);
1176: /* create and assemble symbolic parallel matrix C */
1177: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1178: MatSetBlockSizesFromMats(C,A,P);
1179: MatGetType(A,&mtype);
1180: MatSetType(C,mtype);
1181: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1182: MatPreallocateFinalize(dnz,onz);
1184: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1185: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1186: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1187: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1189: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1190: C->ops->productnumeric = MatProductNumeric_AB;
1192: /* attach the supporting struct to C for reuse */
1193: C->product->data = ptap;
1194: C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1196: /* set MatInfo */
1197: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1198: if (afill < 1.0) afill = 1.0;
1199: C->info.mallocs = nspacedouble;
1200: C->info.fill_ratio_given = fill;
1201: C->info.fill_ratio_needed = afill;
1203: #if defined(PETSC_USE_INFO)
1204: if (api[am]) {
1205: PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1206: PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1207: } else {
1208: PetscInfo(C,"Empty matrix product\n");
1209: }
1210: #endif
1212: MatDestroy(&aopoth);
1213: MatDestroy(&adpd);
1214: PetscFree(j_temp);
1215: PetscFree(adpoj);
1216: PetscFree(adpoi);
1217: return 0;
1218: }
1220: /*-------------------------------------------------------------------------*/
1221: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1222: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1223: {
1224: Mat_APMPI *ptap;
1225: Mat Pt;
1227: MatCheckProduct(C,3);
1228: ptap = (Mat_APMPI*)C->product->data;
1232: Pt = ptap->Pt;
1233: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
1234: MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);
1235: return 0;
1236: }
1238: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1239: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1240: {
1241: PetscErrorCode ierr;
1242: Mat_APMPI *ptap;
1243: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1244: MPI_Comm comm;
1245: PetscMPIInt size,rank;
1246: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1247: PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1248: PetscInt *lnk,i,k,nsend,rstart;
1249: PetscBT lnkbt;
1250: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,nrecv;
1251: PETSC_UNUSED PetscMPIInt icompleted=0;
1252: PetscInt **buf_rj,**buf_ri,**buf_ri_k,row,ncols,*cols;
1253: PetscInt len,proc,*dnz,*onz,*owners,nzi;
1254: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1255: MPI_Request *swaits,*rwaits;
1256: MPI_Status *sstatus,rstatus;
1257: PetscLayout rowmap;
1258: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1259: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
1260: PetscInt *Jptr,*prmap=p->garray,con,j,Crmax;
1261: Mat_SeqAIJ *a_loc,*c_loc,*c_oth;
1262: PetscTable ta;
1263: MatType mtype;
1264: const char *prefix;
1266: PetscObjectGetComm((PetscObject)A,&comm);
1267: MPI_Comm_size(comm,&size);
1268: MPI_Comm_rank(comm,&rank);
1270: /* create symbolic parallel matrix C */
1271: MatGetType(A,&mtype);
1272: MatSetType(C,mtype);
1274: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1276: /* create struct Mat_APMPI and attached it to C later */
1277: PetscNew(&ptap);
1278: ptap->reuse = MAT_INITIAL_MATRIX;
1280: /* (0) compute Rd = Pd^T, Ro = Po^T */
1281: /* --------------------------------- */
1282: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
1283: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
1285: /* (1) compute symbolic A_loc */
1286: /* ---------------------------*/
1287: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);
1289: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1290: /* ------------------------------------ */
1291: MatGetOptionsPrefix(A,&prefix);
1292: MatSetOptionsPrefix(ptap->Ro,prefix);
1293: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
1294: MatCreate(PETSC_COMM_SELF,&ptap->C_oth);
1295: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);
1297: /* (3) send coj of C_oth to other processors */
1298: /* ------------------------------------------ */
1299: /* determine row ownership */
1300: PetscLayoutCreate(comm,&rowmap);
1301: rowmap->n = pn;
1302: rowmap->bs = 1;
1303: PetscLayoutSetUp(rowmap);
1304: owners = rowmap->range;
1306: /* determine the number of messages to send, their lengths */
1307: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
1308: PetscArrayzero(len_s,size);
1309: PetscArrayzero(len_si,size);
1311: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1312: coi = c_oth->i; coj = c_oth->j;
1313: con = ptap->C_oth->rmap->n;
1314: proc = 0;
1315: for (i=0; i<con; i++) {
1316: while (prmap[i] >= owners[proc+1]) proc++;
1317: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1318: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1319: }
1321: len = 0; /* max length of buf_si[], see (4) */
1322: owners_co[0] = 0;
1323: nsend = 0;
1324: for (proc=0; proc<size; proc++) {
1325: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1326: if (len_s[proc]) {
1327: nsend++;
1328: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1329: len += len_si[proc];
1330: }
1331: }
1333: /* determine the number and length of messages to receive for coi and coj */
1334: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
1335: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
1337: /* post the Irecv and Isend of coj */
1338: PetscCommGetNewTag(comm,&tagj);
1339: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
1340: PetscMalloc1(nsend+1,&swaits);
1341: for (proc=0, k=0; proc<size; proc++) {
1342: if (!len_s[proc]) continue;
1343: i = owners_co[proc];
1344: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1345: k++;
1346: }
1348: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1349: /* ---------------------------------------- */
1350: MatSetOptionsPrefix(ptap->Rd,prefix);
1351: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
1352: MatCreate(PETSC_COMM_SELF,&ptap->C_loc);
1353: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);
1354: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1356: /* receives coj are complete */
1357: for (i=0; i<nrecv; i++) {
1358: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1359: }
1360: PetscFree(rwaits);
1361: if (nsend) MPI_Waitall(nsend,swaits,sstatus);
1363: /* add received column indices into ta to update Crmax */
1364: a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1366: /* create and initialize a linked list */
1367: PetscTableCreate(an,aN,&ta); /* for compute Crmax */
1368: MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1370: for (k=0; k<nrecv; k++) {/* k-th received message */
1371: Jptr = buf_rj[k];
1372: for (j=0; j<len_r[k]; j++) {
1373: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1374: }
1375: }
1376: PetscTableGetCount(ta,&Crmax);
1377: PetscTableDestroy(&ta);
1379: /* (4) send and recv coi */
1380: /*-----------------------*/
1381: PetscCommGetNewTag(comm,&tagi);
1382: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
1383: PetscMalloc1(len+1,&buf_s);
1384: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1385: for (proc=0,k=0; proc<size; proc++) {
1386: if (!len_s[proc]) continue;
1387: /* form outgoing message for i-structure:
1388: buf_si[0]: nrows to be sent
1389: [1:nrows]: row index (global)
1390: [nrows+1:2*nrows+1]: i-structure index
1391: */
1392: /*-------------------------------------------*/
1393: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1394: buf_si_i = buf_si + nrows+1;
1395: buf_si[0] = nrows;
1396: buf_si_i[0] = 0;
1397: nrows = 0;
1398: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1399: nzi = coi[i+1] - coi[i];
1400: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1401: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1402: nrows++;
1403: }
1404: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1405: k++;
1406: buf_si += len_si[proc];
1407: }
1408: for (i=0; i<nrecv; i++) {
1409: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1410: }
1411: PetscFree(rwaits);
1412: if (nsend) MPI_Waitall(nsend,swaits,sstatus);
1414: PetscFree4(len_s,len_si,sstatus,owners_co);
1415: PetscFree(len_ri);
1416: PetscFree(swaits);
1417: PetscFree(buf_s);
1419: /* (5) compute the local portion of C */
1420: /* ------------------------------------------ */
1421: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1422: PetscFreeSpaceGet(Crmax,&free_space);
1423: current_space = free_space;
1425: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
1426: for (k=0; k<nrecv; k++) {
1427: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1428: nrows = *buf_ri_k[k];
1429: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1430: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
1431: }
1433: MatPreallocateInitialize(comm,pn,an,dnz,onz);
1434: PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);
1435: for (i=0; i<pn; i++) { /* for each local row of C */
1436: /* add C_loc into C */
1437: nzi = c_loc->i[i+1] - c_loc->i[i];
1438: Jptr = c_loc->j + c_loc->i[i];
1439: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1441: /* add received col data into lnk */
1442: for (k=0; k<nrecv; k++) { /* k-th received message */
1443: if (i == *nextrow[k]) { /* i-th row */
1444: nzi = *(nextci[k]+1) - *nextci[k];
1445: Jptr = buf_rj[k] + *nextci[k];
1446: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1447: nextrow[k]++; nextci[k]++;
1448: }
1449: }
1451: /* add missing diagonal entry */
1452: if (C->force_diagonals) {
1453: k = i + owners[rank]; /* column index */
1454: PetscLLCondensedAddSorted(1,&k,lnk,lnkbt);
1455: }
1457: nzi = lnk[0];
1459: /* copy data into free space, then initialize lnk */
1460: PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);
1461: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
1462: }
1463: PetscFree3(buf_ri_k,nextrow,nextci);
1464: PetscLLDestroy(lnk,lnkbt);
1465: PetscFreeSpaceDestroy(free_space);
1467: /* local sizes and preallocation */
1468: MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);
1469: if (P->cmap->bs > 0) PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);
1470: if (A->cmap->bs > 0) PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);
1471: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1472: MatPreallocateFinalize(dnz,onz);
1474: /* add C_loc and C_oth to C */
1475: MatGetOwnershipRange(C,&rstart,NULL);
1476: for (i=0; i<pn; i++) {
1477: ncols = c_loc->i[i+1] - c_loc->i[i];
1478: cols = c_loc->j + c_loc->i[i];
1479: row = rstart + i;
1480: MatSetValues(C,1,(const PetscInt*)&row,ncols,(const PetscInt*)cols,NULL,INSERT_VALUES);
1482: if (C->force_diagonals) {
1483: MatSetValues(C,1,(const PetscInt*)&row,1,(const PetscInt*)&row,NULL,INSERT_VALUES);
1484: }
1485: }
1486: for (i=0; i<con; i++) {
1487: ncols = c_oth->i[i+1] - c_oth->i[i];
1488: cols = c_oth->j + c_oth->i[i];
1489: row = prmap[i];
1490: MatSetValues(C,1,(const PetscInt*)&row,ncols,(const PetscInt*)cols,NULL,INSERT_VALUES);
1491: }
1492: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1493: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1494: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1496: /* members in merge */
1497: PetscFree(id_r);
1498: PetscFree(len_r);
1499: PetscFree(buf_ri[0]);
1500: PetscFree(buf_ri);
1501: PetscFree(buf_rj[0]);
1502: PetscFree(buf_rj);
1503: PetscLayoutDestroy(&rowmap);
1505: /* attach the supporting struct to C for reuse */
1506: C->product->data = ptap;
1507: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1508: return 0;
1509: }
1511: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1512: {
1513: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
1514: Mat_SeqAIJ *c_seq;
1515: Mat_APMPI *ptap;
1516: Mat A_loc,C_loc,C_oth;
1517: PetscInt i,rstart,rend,cm,ncols,row;
1518: const PetscInt *cols;
1519: const PetscScalar *vals;
1521: MatCheckProduct(C,3);
1522: ptap = (Mat_APMPI*)C->product->data;
1525: MatZeroEntries(C);
1527: if (ptap->reuse == MAT_REUSE_MATRIX) {
1528: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1529: /* 1) get R = Pd^T, Ro = Po^T */
1530: /*----------------------------*/
1531: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1532: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1534: /* 2) compute numeric A_loc */
1535: /*--------------------------*/
1536: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);
1537: }
1539: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1540: A_loc = ptap->A_loc;
1541: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);
1542: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);
1543: C_loc = ptap->C_loc;
1544: C_oth = ptap->C_oth;
1546: /* add C_loc and C_oth to C */
1547: MatGetOwnershipRange(C,&rstart,&rend);
1549: /* C_loc -> C */
1550: cm = C_loc->rmap->N;
1551: c_seq = (Mat_SeqAIJ*)C_loc->data;
1552: cols = c_seq->j;
1553: vals = c_seq->a;
1554: for (i=0; i<cm; i++) {
1555: ncols = c_seq->i[i+1] - c_seq->i[i];
1556: row = rstart + i;
1557: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1558: cols += ncols; vals += ncols;
1559: }
1561: /* Co -> C, off-processor part */
1562: cm = C_oth->rmap->N;
1563: c_seq = (Mat_SeqAIJ*)C_oth->data;
1564: cols = c_seq->j;
1565: vals = c_seq->a;
1566: for (i=0; i<cm; i++) {
1567: ncols = c_seq->i[i+1] - c_seq->i[i];
1568: row = p->garray[i];
1569: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1570: cols += ncols; vals += ncols;
1571: }
1572: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1573: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1574: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1576: ptap->reuse = MAT_REUSE_MATRIX;
1577: return 0;
1578: }
1580: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1581: {
1582: Mat_Merge_SeqsToMPI *merge;
1583: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1584: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1585: Mat_APMPI *ptap;
1586: PetscInt *adj;
1587: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1588: MatScalar *ada,*ca,valtmp;
1589: PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1590: MPI_Comm comm;
1591: PetscMPIInt size,rank,taga,*len_s;
1592: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1593: PetscInt **buf_ri,**buf_rj;
1594: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1595: MPI_Request *s_waits,*r_waits;
1596: MPI_Status *status;
1597: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1598: const PetscScalar *dummy;
1599: PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ;
1600: Mat A_loc;
1601: Mat_SeqAIJ *a_loc;
1603: MatCheckProduct(C,3);
1604: ptap = (Mat_APMPI*)C->product->data;
1607: PetscObjectGetComm((PetscObject)C,&comm);
1608: MPI_Comm_size(comm,&size);
1609: MPI_Comm_rank(comm,&rank);
1611: merge = ptap->merge;
1613: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1614: /*------------------------------------------*/
1615: /* get data from symbolic products */
1616: coi = merge->coi; coj = merge->coj;
1617: PetscCalloc1(coi[pon]+1,&coa);
1618: bi = merge->bi; bj = merge->bj;
1619: owners = merge->rowmap->range;
1620: PetscCalloc1(bi[cm]+1,&ba);
1622: /* get A_loc by taking all local rows of A */
1623: A_loc = ptap->A_loc;
1624: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1625: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1626: ai = a_loc->i;
1627: aj = a_loc->j;
1629: /* trigger copy to CPU */
1630: MatSeqAIJGetArrayRead(p->A,&dummy);
1631: MatSeqAIJRestoreArrayRead(p->A,&dummy);
1632: MatSeqAIJGetArrayRead(p->B,&dummy);
1633: MatSeqAIJRestoreArrayRead(p->B,&dummy);
1634: for (i=0; i<am; i++) {
1635: anz = ai[i+1] - ai[i];
1636: adj = aj + ai[i];
1637: ada = a_loc->a + ai[i];
1639: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1640: /*-------------------------------------------------------------*/
1641: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1642: pnz = po->i[i+1] - po->i[i];
1643: poJ = po->j + po->i[i];
1644: pA = po->a + po->i[i];
1645: for (j=0; j<pnz; j++) {
1646: row = poJ[j];
1647: cj = coj + coi[row];
1648: ca = coa + coi[row];
1649: /* perform sparse axpy */
1650: nexta = 0;
1651: valtmp = pA[j];
1652: for (k=0; nexta<anz; k++) {
1653: if (cj[k] == adj[nexta]) {
1654: ca[k] += valtmp*ada[nexta];
1655: nexta++;
1656: }
1657: }
1658: PetscLogFlops(2.0*anz);
1659: }
1661: /* put the value into Cd (diagonal part) */
1662: pnz = pd->i[i+1] - pd->i[i];
1663: pdJ = pd->j + pd->i[i];
1664: pA = pd->a + pd->i[i];
1665: for (j=0; j<pnz; j++) {
1666: row = pdJ[j];
1667: cj = bj + bi[row];
1668: ca = ba + bi[row];
1669: /* perform sparse axpy */
1670: nexta = 0;
1671: valtmp = pA[j];
1672: for (k=0; nexta<anz; k++) {
1673: if (cj[k] == adj[nexta]) {
1674: ca[k] += valtmp*ada[nexta];
1675: nexta++;
1676: }
1677: }
1678: PetscLogFlops(2.0*anz);
1679: }
1680: }
1682: /* 3) send and recv matrix values coa */
1683: /*------------------------------------*/
1684: buf_ri = merge->buf_ri;
1685: buf_rj = merge->buf_rj;
1686: len_s = merge->len_s;
1687: PetscCommGetNewTag(comm,&taga);
1688: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1690: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1691: for (proc=0,k=0; proc<size; proc++) {
1692: if (!len_s[proc]) continue;
1693: i = merge->owners_co[proc];
1694: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1695: k++;
1696: }
1697: if (merge->nrecv) MPI_Waitall(merge->nrecv,r_waits,status);
1698: if (merge->nsend) MPI_Waitall(merge->nsend,s_waits,status);
1700: PetscFree2(s_waits,status);
1701: PetscFree(r_waits);
1702: PetscFree(coa);
1704: /* 4) insert local Cseq and received values into Cmpi */
1705: /*----------------------------------------------------*/
1706: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1707: for (k=0; k<merge->nrecv; k++) {
1708: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1709: nrows = *(buf_ri_k[k]);
1710: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1711: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure */
1712: }
1714: for (i=0; i<cm; i++) {
1715: row = owners[rank] + i; /* global row index of C_seq */
1716: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1717: ba_i = ba + bi[i];
1718: bnz = bi[i+1] - bi[i];
1719: /* add received vals into ba */
1720: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1721: /* i-th row */
1722: if (i == *nextrow[k]) {
1723: cnz = *(nextci[k]+1) - *nextci[k];
1724: cj = buf_rj[k] + *(nextci[k]);
1725: ca = abuf_r[k] + *(nextci[k]);
1726: nextcj = 0;
1727: for (j=0; nextcj<cnz; j++) {
1728: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1729: ba_i[j] += ca[nextcj++];
1730: }
1731: }
1732: nextrow[k]++; nextci[k]++;
1733: PetscLogFlops(2.0*cnz);
1734: }
1735: }
1736: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1737: }
1738: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1739: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1741: PetscFree(ba);
1742: PetscFree(abuf_r[0]);
1743: PetscFree(abuf_r);
1744: PetscFree3(buf_ri_k,nextrow,nextci);
1745: return 0;
1746: }
1748: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1749: {
1750: PetscErrorCode ierr;
1751: Mat A_loc;
1752: Mat_APMPI *ptap;
1753: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1754: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data;
1755: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1756: PetscInt nnz;
1757: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1758: PetscInt am =A->rmap->n,pn=P->cmap->n;
1759: MPI_Comm comm;
1760: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1761: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1762: PetscInt len,proc,*dnz,*onz,*owners;
1763: PetscInt nzi,*bi,*bj;
1764: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1765: MPI_Request *swaits,*rwaits;
1766: MPI_Status *sstatus,rstatus;
1767: Mat_Merge_SeqsToMPI *merge;
1768: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1769: PetscReal afill =1.0,afill_tmp;
1770: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1771: Mat_SeqAIJ *a_loc;
1772: PetscTable ta;
1773: MatType mtype;
1775: PetscObjectGetComm((PetscObject)A,&comm);
1776: /* check if matrix local sizes are compatible */
1779: MPI_Comm_size(comm,&size);
1780: MPI_Comm_rank(comm,&rank);
1782: /* create struct Mat_APMPI and attached it to C later */
1783: PetscNew(&ptap);
1785: /* get A_loc by taking all local rows of A */
1786: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1788: ptap->A_loc = A_loc;
1789: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1790: ai = a_loc->i;
1791: aj = a_loc->j;
1793: /* determine symbolic Co=(p->B)^T*A - send to others */
1794: /*----------------------------------------------------*/
1795: MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
1796: MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
1797: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1798: >= (num of nonzero rows of C_seq) - pn */
1799: PetscMalloc1(pon+1,&coi);
1800: coi[0] = 0;
1802: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1803: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1804: PetscFreeSpaceGet(nnz,&free_space);
1805: current_space = free_space;
1807: /* create and initialize a linked list */
1808: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1809: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1810: PetscTableGetCount(ta,&Armax);
1812: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1814: for (i=0; i<pon; i++) {
1815: pnz = poti[i+1] - poti[i];
1816: ptJ = potj + poti[i];
1817: for (j=0; j<pnz; j++) {
1818: row = ptJ[j]; /* row of A_loc == col of Pot */
1819: anz = ai[row+1] - ai[row];
1820: Jptr = aj + ai[row];
1821: /* add non-zero cols of AP into the sorted linked list lnk */
1822: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1823: }
1824: nnz = lnk[0];
1826: /* If free space is not available, double the total space in the list */
1827: if (current_space->local_remaining<nnz) {
1828: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1829: nspacedouble++;
1830: }
1832: /* Copy data into free space, and zero out denserows */
1833: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1835: current_space->array += nnz;
1836: current_space->local_used += nnz;
1837: current_space->local_remaining -= nnz;
1839: coi[i+1] = coi[i] + nnz;
1840: }
1842: PetscMalloc1(coi[pon]+1,&coj);
1843: PetscFreeSpaceContiguous(&free_space,coj);
1844: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1846: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1847: if (afill_tmp > afill) afill = afill_tmp;
1849: /* send j-array (coj) of Co to other processors */
1850: /*----------------------------------------------*/
1851: /* determine row ownership */
1852: PetscNew(&merge);
1853: PetscLayoutCreate(comm,&merge->rowmap);
1855: merge->rowmap->n = pn;
1856: merge->rowmap->bs = 1;
1858: PetscLayoutSetUp(merge->rowmap);
1859: owners = merge->rowmap->range;
1861: /* determine the number of messages to send, their lengths */
1862: PetscCalloc1(size,&len_si);
1863: PetscCalloc1(size,&merge->len_s);
1865: len_s = merge->len_s;
1866: merge->nsend = 0;
1868: PetscMalloc1(size+2,&owners_co);
1870: proc = 0;
1871: for (i=0; i<pon; i++) {
1872: while (prmap[i] >= owners[proc+1]) proc++;
1873: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1874: len_s[proc] += coi[i+1] - coi[i];
1875: }
1877: len = 0; /* max length of buf_si[] */
1878: owners_co[0] = 0;
1879: for (proc=0; proc<size; proc++) {
1880: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1881: if (len_si[proc]) {
1882: merge->nsend++;
1883: len_si[proc] = 2*(len_si[proc] + 1);
1884: len += len_si[proc];
1885: }
1886: }
1888: /* determine the number and length of messages to receive for coi and coj */
1889: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1890: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1892: /* post the Irecv and Isend of coj */
1893: PetscCommGetNewTag(comm,&tagj);
1894: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1895: PetscMalloc1(merge->nsend+1,&swaits);
1896: for (proc=0, k=0; proc<size; proc++) {
1897: if (!len_s[proc]) continue;
1898: i = owners_co[proc];
1899: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1900: k++;
1901: }
1903: /* receives and sends of coj are complete */
1904: PetscMalloc1(size,&sstatus);
1905: for (i=0; i<merge->nrecv; i++) {
1906: PETSC_UNUSED PetscMPIInt icompleted;
1907: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1908: }
1909: PetscFree(rwaits);
1910: if (merge->nsend) MPI_Waitall(merge->nsend,swaits,sstatus);
1912: /* add received column indices into table to update Armax */
1913: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1914: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1915: Jptr = buf_rj[k];
1916: for (j=0; j<merge->len_r[k]; j++) {
1917: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1918: }
1919: }
1920: PetscTableGetCount(ta,&Armax);
1922: /* send and recv coi */
1923: /*-------------------*/
1924: PetscCommGetNewTag(comm,&tagi);
1925: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
1926: PetscMalloc1(len+1,&buf_s);
1927: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1928: for (proc=0,k=0; proc<size; proc++) {
1929: if (!len_s[proc]) continue;
1930: /* form outgoing message for i-structure:
1931: buf_si[0]: nrows to be sent
1932: [1:nrows]: row index (global)
1933: [nrows+1:2*nrows+1]: i-structure index
1934: */
1935: /*-------------------------------------------*/
1936: nrows = len_si[proc]/2 - 1;
1937: buf_si_i = buf_si + nrows+1;
1938: buf_si[0] = nrows;
1939: buf_si_i[0] = 0;
1940: nrows = 0;
1941: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1942: nzi = coi[i+1] - coi[i];
1943: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1944: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1945: nrows++;
1946: }
1947: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1948: k++;
1949: buf_si += len_si[proc];
1950: }
1951: i = merge->nrecv;
1952: while (i--) {
1953: PETSC_UNUSED PetscMPIInt icompleted;
1954: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
1955: }
1956: PetscFree(rwaits);
1957: if (merge->nsend) MPI_Waitall(merge->nsend,swaits,sstatus);
1958: PetscFree(len_si);
1959: PetscFree(len_ri);
1960: PetscFree(swaits);
1961: PetscFree(sstatus);
1962: PetscFree(buf_s);
1964: /* compute the local portion of C (mpi mat) */
1965: /*------------------------------------------*/
1966: /* allocate bi array and free space for accumulating nonzero column info */
1967: PetscMalloc1(pn+1,&bi);
1968: bi[0] = 0;
1970: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1971: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1972: PetscFreeSpaceGet(nnz,&free_space);
1973: current_space = free_space;
1975: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1976: for (k=0; k<merge->nrecv; k++) {
1977: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1978: nrows = *buf_ri_k[k];
1979: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1980: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
1981: }
1983: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1984: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
1985: rmax = 0;
1986: for (i=0; i<pn; i++) {
1987: /* add pdt[i,:]*AP into lnk */
1988: pnz = pdti[i+1] - pdti[i];
1989: ptJ = pdtj + pdti[i];
1990: for (j=0; j<pnz; j++) {
1991: row = ptJ[j]; /* row of AP == col of Pt */
1992: anz = ai[row+1] - ai[row];
1993: Jptr = aj + ai[row];
1994: /* add non-zero cols of AP into the sorted linked list lnk */
1995: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1996: }
1998: /* add received col data into lnk */
1999: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
2000: if (i == *nextrow[k]) { /* i-th row */
2001: nzi = *(nextci[k]+1) - *nextci[k];
2002: Jptr = buf_rj[k] + *nextci[k];
2003: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
2004: nextrow[k]++; nextci[k]++;
2005: }
2006: }
2008: /* add missing diagonal entry */
2009: if (C->force_diagonals) {
2010: k = i + owners[rank]; /* column index */
2011: PetscLLCondensedAddSorted_Scalable(1,&k,lnk);
2012: }
2014: nnz = lnk[0];
2016: /* if free space is not available, make more free space */
2017: if (current_space->local_remaining<nnz) {
2018: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
2019: nspacedouble++;
2020: }
2021: /* copy data into free space, then initialize lnk */
2022: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
2023: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
2025: current_space->array += nnz;
2026: current_space->local_used += nnz;
2027: current_space->local_remaining -= nnz;
2029: bi[i+1] = bi[i] + nnz;
2030: if (nnz > rmax) rmax = nnz;
2031: }
2032: PetscFree3(buf_ri_k,nextrow,nextci);
2034: PetscMalloc1(bi[pn]+1,&bj);
2035: PetscFreeSpaceContiguous(&free_space,bj);
2036: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
2037: if (afill_tmp > afill) afill = afill_tmp;
2038: PetscLLCondensedDestroy_Scalable(lnk);
2039: PetscTableDestroy(&ta);
2040: MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
2041: MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);
2043: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
2044: /*-------------------------------------------------------------------------------*/
2045: MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
2046: MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
2047: MatGetType(A,&mtype);
2048: MatSetType(C,mtype);
2049: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
2050: MatPreallocateFinalize(dnz,onz);
2051: MatSetBlockSize(C,1);
2052: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
2053: for (i=0; i<pn; i++) {
2054: row = i + rstart;
2055: nnz = bi[i+1] - bi[i];
2056: Jptr = bj + bi[i];
2057: MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);
2058: }
2059: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2060: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2061: MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
2062: merge->bi = bi;
2063: merge->bj = bj;
2064: merge->coi = coi;
2065: merge->coj = coj;
2066: merge->buf_ri = buf_ri;
2067: merge->buf_rj = buf_rj;
2068: merge->owners_co = owners_co;
2070: /* attach the supporting struct to C for reuse */
2071: C->product->data = ptap;
2072: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2073: ptap->merge = merge;
2075: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2077: #if defined(PETSC_USE_INFO)
2078: if (bi[pn] != 0) {
2079: PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
2080: PetscInfo(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
2081: } else {
2082: PetscInfo(C,"Empty matrix product\n");
2083: }
2084: #endif
2085: return 0;
2086: }
2088: /* ---------------------------------------------------------------- */
2089: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2090: {
2091: Mat_Product *product = C->product;
2092: Mat A=product->A,B=product->B;
2093: PetscReal fill=product->fill;
2094: PetscBool flg;
2096: /* scalable */
2097: PetscStrcmp(product->alg,"scalable",&flg);
2098: if (flg) {
2099: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
2100: goto next;
2101: }
2103: /* nonscalable */
2104: PetscStrcmp(product->alg,"nonscalable",&flg);
2105: if (flg) {
2106: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
2107: goto next;
2108: }
2110: /* matmatmult */
2111: PetscStrcmp(product->alg,"at*b",&flg);
2112: if (flg) {
2113: Mat At;
2114: Mat_APMPI *ptap;
2116: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
2117: MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);
2118: ptap = (Mat_APMPI*)C->product->data;
2119: if (ptap) {
2120: ptap->Pt = At;
2121: C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2122: }
2123: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2124: goto next;
2125: }
2127: /* backend general code */
2128: PetscStrcmp(product->alg,"backend",&flg);
2129: if (flg) {
2130: MatProductSymbolic_MPIAIJBACKEND(C);
2131: return 0;
2132: }
2134: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2136: next:
2137: C->ops->productnumeric = MatProductNumeric_AtB;
2138: return 0;
2139: }
2141: /* ---------------------------------------------------------------- */
2142: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2143: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2144: {
2146: Mat_Product *product = C->product;
2147: Mat A=product->A,B=product->B;
2148: #if defined(PETSC_HAVE_HYPRE)
2149: const char *algTypes[5] = {"scalable","nonscalable","seqmpi","backend","hypre"};
2150: PetscInt nalg = 5;
2151: #else
2152: const char *algTypes[4] = {"scalable","nonscalable","seqmpi","backend",};
2153: PetscInt nalg = 4;
2154: #endif
2155: PetscInt alg = 1; /* set nonscalable algorithm as default */
2156: PetscBool flg;
2157: MPI_Comm comm;
2159: /* Check matrix local sizes */
2160: PetscObjectGetComm((PetscObject)C,&comm);
2163: /* Set "nonscalable" as default algorithm */
2164: PetscStrcmp(C->product->alg,"default",&flg);
2165: if (flg) {
2166: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2168: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2169: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2170: MatInfo Ainfo,Binfo;
2171: PetscInt nz_local;
2172: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2174: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2175: MatGetInfo(B,MAT_LOCAL,&Binfo);
2176: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2178: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2179: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2181: if (alg_scalable) {
2182: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2183: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2184: PetscInfo(B,"Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n",B->cmap->N,(double)(product->fill*nz_local));
2185: }
2186: }
2187: }
2189: /* Get runtime option */
2190: if (product->api_user) {
2191: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2192: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2193: PetscOptionsEnd();
2194: } else {
2195: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2196: PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2197: PetscOptionsEnd();
2198: }
2199: if (flg) {
2200: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2201: }
2203: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2204: return 0;
2205: }
2207: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2208: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2209: {
2211: Mat_Product *product = C->product;
2212: Mat A=product->A,B=product->B;
2213: const char *algTypes[4] = {"scalable","nonscalable","at*b","backend"};
2214: PetscInt nalg = 4;
2215: PetscInt alg = 1; /* set default algorithm */
2216: PetscBool flg;
2217: MPI_Comm comm;
2219: /* Check matrix local sizes */
2220: PetscObjectGetComm((PetscObject)C,&comm);
2223: /* Set default algorithm */
2224: PetscStrcmp(C->product->alg,"default",&flg);
2225: if (flg) {
2226: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2227: }
2229: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2230: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2231: MatInfo Ainfo,Binfo;
2232: PetscInt nz_local;
2233: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2235: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2236: MatGetInfo(B,MAT_LOCAL,&Binfo);
2237: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2239: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2240: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2242: if (alg_scalable) {
2243: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2244: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2245: PetscInfo(B,"Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n",B->cmap->N,(double)(product->fill*nz_local));
2246: }
2247: }
2249: /* Get runtime option */
2250: if (product->api_user) {
2251: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2252: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2253: PetscOptionsEnd();
2254: } else {
2255: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2256: PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2257: PetscOptionsEnd();
2258: }
2259: if (flg) {
2260: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2261: }
2263: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2264: return 0;
2265: }
2267: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2268: {
2270: Mat_Product *product = C->product;
2271: Mat A=product->A,P=product->B;
2272: MPI_Comm comm;
2273: PetscBool flg;
2274: PetscInt alg=1; /* set default algorithm */
2275: #if !defined(PETSC_HAVE_HYPRE)
2276: const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","backend"};
2277: PetscInt nalg=5;
2278: #else
2279: const char *algTypes[6] = {"scalable","nonscalable","allatonce","allatonce_merged","backend","hypre"};
2280: PetscInt nalg=6;
2281: #endif
2282: PetscInt pN=P->cmap->N;
2284: /* Check matrix local sizes */
2285: PetscObjectGetComm((PetscObject)C,&comm);
2289: /* Set "nonscalable" as default algorithm */
2290: PetscStrcmp(C->product->alg,"default",&flg);
2291: if (flg) {
2292: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2294: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2295: if (pN > 100000) {
2296: MatInfo Ainfo,Pinfo;
2297: PetscInt nz_local;
2298: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2300: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2301: MatGetInfo(P,MAT_LOCAL,&Pinfo);
2302: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2304: if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2305: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2307: if (alg_scalable) {
2308: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2309: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2310: }
2311: }
2312: }
2314: /* Get runtime option */
2315: if (product->api_user) {
2316: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2317: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2318: PetscOptionsEnd();
2319: } else {
2320: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2321: PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2322: PetscOptionsEnd();
2323: }
2324: if (flg) {
2325: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2326: }
2328: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2329: return 0;
2330: }
2332: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2333: {
2334: Mat_Product *product = C->product;
2335: Mat A = product->A,R=product->B;
2337: /* Check matrix local sizes */
2340: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2341: return 0;
2342: }
2344: /*
2345: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2346: */
2347: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2348: {
2350: Mat_Product *product = C->product;
2351: PetscBool flg = PETSC_FALSE;
2352: PetscInt alg = 1; /* default algorithm */
2353: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2354: PetscInt nalg = 3;
2356: /* Set default algorithm */
2357: PetscStrcmp(C->product->alg,"default",&flg);
2358: if (flg) {
2359: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2360: }
2362: /* Get runtime option */
2363: if (product->api_user) {
2364: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2365: PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2366: PetscOptionsEnd();
2367: } else {
2368: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2369: PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);
2370: PetscOptionsEnd();
2371: }
2372: if (flg) {
2373: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2374: }
2376: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2377: C->ops->productsymbolic = MatProductSymbolic_ABC;
2378: return 0;
2379: }
2381: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2382: {
2383: Mat_Product *product = C->product;
2385: switch (product->type) {
2386: case MATPRODUCT_AB:
2387: MatProductSetFromOptions_MPIAIJ_AB(C);
2388: break;
2389: case MATPRODUCT_AtB:
2390: MatProductSetFromOptions_MPIAIJ_AtB(C);
2391: break;
2392: case MATPRODUCT_PtAP:
2393: MatProductSetFromOptions_MPIAIJ_PtAP(C);
2394: break;
2395: case MATPRODUCT_RARt:
2396: MatProductSetFromOptions_MPIAIJ_RARt(C);
2397: break;
2398: case MATPRODUCT_ABC:
2399: MatProductSetFromOptions_MPIAIJ_ABC(C);
2400: break;
2401: default:
2402: break;
2403: }
2404: return 0;
2405: }