Actual source code: mpimatmatmult.c
petsc-3.13.6 2020-09-29
2: /*
3: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4: C = A * B
5: */
6: #include <../src/mat/impls/aij/seq/aij.h>
7: #include <../src/mat/utils/freespace.h>
8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
9: #include <petscbt.h>
10: #include <../src/mat/impls/dense/mpi/mpidense.h>
11: #include <petsc/private/vecimpl.h>
12: #include <petsc/private/vecscatterimpl.h>
14: #if defined(PETSC_HAVE_HYPRE)
15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16: #endif
18: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19: {
20: PetscErrorCode ierr;
21: Mat_Product *product = C->product;
22: Mat A=product->A,B=product->B;
23: MatProductAlgorithm alg=product->alg;
24: PetscReal fill=product->fill;
25: PetscBool flg;
28: /* scalable */
29: PetscStrcmp(alg,"scalable",&flg);
30: if (flg) {
31: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
32: goto next;
33: }
35: /* nonscalable */
36: PetscStrcmp(alg,"nonscalable",&flg);
37: if (flg) {
38: MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
39: goto next;
40: }
42: /* seqmpi */
43: PetscStrcmp(alg,"seqmpi",&flg);
44: if (flg) {
45: MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);
46: goto next;
47: }
49: #if defined(PETSC_HAVE_HYPRE)
50: PetscStrcmp(alg,"hypre",&flg);
51: if (flg) {
52: MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);
53: return(0);
54: }
55: #endif
56: SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
58: next:
59: {
60: Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data;
61: Mat_APMPI *ap = c->ap;
62: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatFreeIntermediateDataStructures","Mat");
63: ap->freestruct = PETSC_FALSE;
64: PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);
65: PetscOptionsEnd();
66: }
67: return(0);
68: }
70: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
71: {
73: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
74: Mat_APMPI *ptap = a->ap;
77: PetscFree2(ptap->startsj_s,ptap->startsj_r);
78: PetscFree(ptap->bufa);
79: MatDestroy(&ptap->P_loc);
80: MatDestroy(&ptap->P_oth);
81: MatDestroy(&ptap->Pt);
82: PetscFree(ptap->api);
83: PetscFree(ptap->apj);
84: PetscFree(ptap->apa);
85: ptap->destroy(A);
86: PetscFree(ptap);
87: return(0);
88: }
90: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
91: {
93: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
94: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
95: Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
96: PetscScalar *cda=cd->a,*coa=co->a;
97: Mat_SeqAIJ *p_loc,*p_oth;
98: PetscScalar *apa,*ca;
99: PetscInt cm =C->rmap->n;
100: Mat_APMPI *ptap=c->ap;
101: PetscInt *api,*apj,*apJ,i,k;
102: PetscInt cstart=C->cmap->rstart;
103: PetscInt cdnz,conz,k0,k1;
104: MPI_Comm comm;
105: PetscMPIInt size;
108: PetscObjectGetComm((PetscObject)A,&comm);
109: MPI_Comm_size(comm,&size);
111: if (!ptap->P_oth && size>1) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
113: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
114: /*-----------------------------------------------------*/
115: /* update numerical values of P_oth and P_loc */
116: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
117: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
119: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
120: /*----------------------------------------------------------*/
121: /* get data from symbolic products */
122: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
123: p_oth = NULL;
124: if (size >1) {
125: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
126: }
128: /* get apa for storing dense row A[i,:]*P */
129: apa = ptap->apa;
131: api = ptap->api;
132: apj = ptap->apj;
133: for (i=0; i<cm; i++) {
134: /* compute apa = A[i,:]*P */
135: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
137: /* set values in C */
138: apJ = apj + api[i];
139: cdnz = cd->i[i+1] - cd->i[i];
140: conz = co->i[i+1] - co->i[i];
142: /* 1st off-diagonal part of C */
143: ca = coa + co->i[i];
144: k = 0;
145: for (k0=0; k0<conz; k0++) {
146: if (apJ[k] >= cstart) break;
147: ca[k0] = apa[apJ[k]];
148: apa[apJ[k++]] = 0.0;
149: }
151: /* diagonal part of C */
152: ca = cda + cd->i[i];
153: for (k1=0; k1<cdnz; k1++) {
154: ca[k1] = apa[apJ[k]];
155: apa[apJ[k++]] = 0.0;
156: }
158: /* 2nd off-diagonal part of C */
159: ca = coa + co->i[i];
160: for (; k0<conz; k0++) {
161: ca[k0] = apa[apJ[k]];
162: apa[apJ[k++]] = 0.0;
163: }
164: }
165: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
166: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
168: if (ptap->freestruct) {
169: MatFreeIntermediateDataStructures(C);
170: }
171: return(0);
172: }
174: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
175: {
176: PetscErrorCode ierr;
177: MPI_Comm comm;
178: PetscMPIInt size;
179: Mat_APMPI *ptap;
180: PetscFreeSpaceList free_space=NULL,current_space=NULL;
181: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c;
182: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
183: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
184: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
185: PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
186: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
187: PetscBT lnkbt;
188: PetscReal afill;
189: MatType mtype;
192: PetscObjectGetComm((PetscObject)A,&comm);
193: MPI_Comm_size(comm,&size);
195: /* create struct Mat_APMPI and attached it to C later */
196: PetscNew(&ptap);
198: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
199: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
201: /* get P_loc by taking all local rows of P */
202: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
204: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
205: pi_loc = p_loc->i; pj_loc = p_loc->j;
206: if (size > 1) {
207: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
208: pi_oth = p_oth->i; pj_oth = p_oth->j;
209: } else {
210: p_oth = NULL;
211: pi_oth = NULL; pj_oth = NULL;
212: }
214: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
215: /*-------------------------------------------------------------------*/
216: PetscMalloc1(am+2,&api);
217: ptap->api = api;
218: api[0] = 0;
220: /* create and initialize a linked list */
221: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
223: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
224: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
225: current_space = free_space;
227: MatPreallocateInitialize(comm,am,pn,dnz,onz);
228: for (i=0; i<am; i++) {
229: /* diagonal portion of A */
230: nzi = adi[i+1] - adi[i];
231: for (j=0; j<nzi; j++) {
232: row = *adj++;
233: pnz = pi_loc[row+1] - pi_loc[row];
234: Jptr = pj_loc + pi_loc[row];
235: /* add non-zero cols of P into the sorted linked list lnk */
236: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
237: }
238: /* off-diagonal portion of A */
239: nzi = aoi[i+1] - aoi[i];
240: for (j=0; j<nzi; j++) {
241: row = *aoj++;
242: pnz = pi_oth[row+1] - pi_oth[row];
243: Jptr = pj_oth + pi_oth[row];
244: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
245: }
247: apnz = lnk[0];
248: api[i+1] = api[i] + apnz;
250: /* if free space is not available, double the total space in the list */
251: if (current_space->local_remaining<apnz) {
252: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
253: nspacedouble++;
254: }
256: /* Copy data into free space, then initialize lnk */
257: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
258: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
260: current_space->array += apnz;
261: current_space->local_used += apnz;
262: current_space->local_remaining -= apnz;
263: }
265: /* Allocate space for apj, initialize apj, and */
266: /* destroy list of free space and other temporary array(s) */
267: PetscMalloc1(api[am]+1,&ptap->apj);
268: apj = ptap->apj;
269: PetscFreeSpaceContiguous(&free_space,ptap->apj);
270: PetscLLDestroy(lnk,lnkbt);
272: /* malloc apa to store dense row A[i,:]*P */
273: PetscCalloc1(pN,&ptap->apa);
275: /* set and assemble symbolic parallel matrix C */
276: /*---------------------------------------------*/
277: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
278: MatSetBlockSizesFromMats(C,A,P);
280: MatGetType(A,&mtype);
281: MatSetType(C,mtype);
282: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
284: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
285: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
286: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
287: MatPreallocateFinalize(dnz,onz);
289: ptap->destroy = C->ops->destroy;
290: ptap->duplicate = C->ops->duplicate;
291: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
292: C->ops->productnumeric = MatProductNumeric_AB;
293: C->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
294: C->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
296: /* attach the supporting struct to C for reuse */
297: c = (Mat_MPIAIJ*)C->data;
298: c->ap = ptap;
300: /* set MatInfo */
301: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
302: if (afill < 1.0) afill = 1.0;
303: C->info.mallocs = nspacedouble;
304: C->info.fill_ratio_given = fill;
305: C->info.fill_ratio_needed = afill;
307: #if defined(PETSC_USE_INFO)
308: if (api[am]) {
309: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
310: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
311: } else {
312: PetscInfo(C,"Empty matrix product\n");
313: }
314: #endif
315: return(0);
316: }
318: /* ------------------------------------------------------- */
319: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
320: {
321: Mat_Product *product = C->product;
322: Mat A = product->A,B=product->B;
325: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
326: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
328: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
329: C->ops->productsymbolic = MatProductSymbolic_AB;
330: C->ops->productnumeric = MatProductNumeric_AB;
331: return(0);
332: }
333: /* -------------------------------------------------------------------- */
334: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
335: {
336: Mat_Product *product = C->product;
337: Mat A = product->A,B=product->B;
340: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
341: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
343: C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
344: C->ops->productsymbolic = MatProductSymbolic_AtB;
345: return(0);
346: }
348: /* --------------------------------------------------------------------- */
349: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
350: {
352: Mat_Product *product = C->product;
355: switch (product->type) {
356: case MATPRODUCT_AB:
357: MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
358: break;
359: case MATPRODUCT_AtB:
360: MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
361: break;
362: default:
363: /* Use MatProduct_Basic() if there is no specific implementation */
364: C->ops->productsymbolic = MatProductSymbolic_Basic;
365: }
366: return(0);
367: }
368: /* ------------------------------------------------------- */
370: typedef struct {
371: Mat workB,Bb,Cb,workB1,Bb1,Cb1;
372: MPI_Request *rwaits,*swaits;
373: PetscInt numBb; /* num of Bb matrices */
374: PetscInt nsends,nrecvs;
375: MPI_Datatype *stype,*rtype;
376: } MPIAIJ_MPIDense;
378: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
379: {
380: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
381: PetscErrorCode ierr;
382: PetscInt i;
385: MatDestroy(&contents->workB);
387: if (contents->numBb) {
388: MatDestroy(&contents->Bb);
389: MatDestroy(&contents->Cb);
391: MatDestroy(&contents->workB1);
392: MatDestroy(&contents->Bb1);
393: MatDestroy(&contents->Cb1);
394: }
395: for (i=0; i<contents->nsends; i++) {
396: MPI_Type_free(&contents->stype[i]);
397: }
398: for (i=0; i<contents->nrecvs; i++) {
399: MPI_Type_free(&contents->rtype[i]);
400: }
401: PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);
402: PetscFree(contents);
403: return(0);
404: }
406: /*
407: This is a "dummy function" that handles the case where matrix C was created as a dense matrix
408: directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option
410: It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C
411: */
412: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C)
413: {
414: PetscBool flg;
418: PetscObjectTypeCompare((PetscObject)A,MATNEST,&flg);
419: if (flg) {
420: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
421: MatMatMultSymbolic_Nest_Dense(A,B,PETSC_DEFAULT,&C);
422: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
423: C->ops->matmultnumeric = MatMatMultNumeric_Nest_Dense;
424: } else {
425: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,PETSC_DEFAULT,C);
426: }
427: (*C->ops->matmultnumeric)(A,B,C);
428: return(0);
429: }
431: /*
432: Create Bb, Cb, Bb1 and Cb1 matrices to be used by MatMatMultSymbolic_MPIAIJ_MPIDense().
433: These matrices are used as wrappers for sub-columns of B and C, thus their own matrix operations are not used.
434: Modified from MatCreateDense().
435: */
436: PETSC_STATIC_INLINE PetscErrorCode MatCreateSubMPIDense_private(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt rbs,PetscInt cbs,PetscScalar *data,Mat *A)
437: {
441: MatCreate(comm,A);
442: MatSetSizes(*A,m,n,M,N);
443: MatSetBlockSizes(*A,rbs,cbs);
444: MatSetType(*A,MATMPIDENSE);
445: MatMPIDenseSetPreallocation(*A,data);
446: (*A)->assembled = PETSC_TRUE;
447: return(0);
448: }
450: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
451: {
452: PetscErrorCode ierr;
453: Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data;
454: Mat_MPIDense *b=(Mat_MPIDense*)B->data;
455: Mat_SeqDense *bseq=(Mat_SeqDense*)(b->A)->data;
456: PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,lda=bseq->lda;
457: PetscContainer container;
458: MPIAIJ_MPIDense *contents;
459: VecScatter ctx=aij->Mvctx;
460: PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from;
461: MPI_Comm comm;
462: MPI_Datatype type1,*stype,*rtype;
463: const PetscInt *sindices,*sstarts,*rstarts;
464: PetscMPIInt *disp;
467: PetscObjectGetComm((PetscObject)A,&comm);
468: if (!C->preallocated) {
469: MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);
470: MatSetBlockSizesFromMats(C,A,B);
471: MatSetType(C,MATMPIDENSE);
472: MatMPIDenseSetPreallocation(C,NULL);
473: }
475: PetscNew(&contents);
476: contents->numBb = 0;
478: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
479: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
481: /* Create column block of B and C for memory scalability when BN is too large */
482: /* Estimate Bbn, column size of Bb */
483: if (nz) {
484: Bbn1 = 2*Am*BN/nz;
485: } else Bbn1 = BN;
487: bs = PetscAbs(B->cmap->bs);
488: Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
489: if (Bbn1 > BN) Bbn1 = BN;
490: MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);
492: /* Enable runtime option for Bbn */
493: PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
494: PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);
495: if (Bbn > BN) SETERRQ2(comm,PETSC_ERR_ARG_SIZ,"Bbn=%D cannot be larger than %D, column size of B",Bbn,BN);
496: PetscOptionsEnd();
498: if (Bbn < BN) {
499: contents->numBb = BN/Bbn;
500: Bbn1 = BN - contents->numBb*Bbn;
501: }
503: if (contents->numBb) {
504: PetscScalar data[1]; /* fake array for Bb and Cb */
505: PetscInfo3(C,"use Bb, BN=%D, Bbn=%D; numBb=%D\n",BN,Bbn,contents->numBb);
506: MatCreateSubMPIDense_private(comm,B->rmap->n,PETSC_DECIDE,A->rmap->N,Bbn,B->rmap->bs,B->cmap->bs,data,&contents->Bb);
507: MatCreateSubMPIDense_private(comm,Am,PETSC_DECIDE,A->rmap->N,Bbn,C->rmap->bs,C->cmap->bs,data,&contents->Cb);
509: if (Bbn1) { /* Create Bb1 and Cb1 for the remaining columns */
510: PetscInfo2(C,"use Bb1, BN=%D, Bbn1=%D\n",BN,Bbn1);
511: MatCreateSubMPIDense_private(comm,B->rmap->n,PETSC_DECIDE,A->rmap->N,Bbn1,B->rmap->bs,B->cmap->bs,data,&contents->Bb1);
512: MatCreateSubMPIDense_private(comm,Am,PETSC_DECIDE,A->rmap->N,Bbn1,C->rmap->bs,C->cmap->bs,data,&contents->Cb1);
514: /* Create work matrix used to store off processor rows of B needed for local product */
515: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);
516: }
517: }
519: /* Create work matrix used to store off processor rows of B needed for local product */
520: MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);
522: /* Use MPI derived data type to reduce memory required by the send/recv buffers */
523: PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);
524: contents->stype = stype;
525: contents->nsends = nsends;
527: contents->rtype = rtype;
528: contents->nrecvs = nrecvs;
530: PetscMalloc1(Bm+1,&disp);
531: for (i=0; i<nsends; i++) {
532: nrows_to = sstarts[i+1]-sstarts[i];
533: for (j=0; j<nrows_to; j++){
534: disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
535: }
536: MPI_Type_create_indexed_block(nrows_to,1,(const PetscMPIInt *)disp,MPIU_SCALAR,&type1);
538: MPI_Type_create_resized(type1,0,lda*sizeof(PetscScalar),&stype[i]);
539: MPI_Type_commit(&stype[i]);
540: MPI_Type_free(&type1);
541: }
543: for (i=0; i<nrecvs; i++) {
544: /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
545: nrows_from = rstarts[i+1]-rstarts[i];
546: disp[0] = 0;
547: MPI_Type_create_indexed_block(1, nrows_from, (const PetscMPIInt *)disp, MPIU_SCALAR, &type1);
548: MPI_Type_create_resized(type1, 0, nz*sizeof(PetscScalar), &rtype[i]);
549: MPI_Type_commit(&rtype[i]);
550: MPI_Type_free(&type1);
551: }
553: PetscFree(disp);
554: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);
555: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);
557: PetscContainerCreate(comm,&container);
558: PetscContainerSetPointer(container,contents);
559: PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);
560: PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);
561: PetscContainerDestroy(&container);
562: MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
563: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
564: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
565: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
566: return(0);
567: }
569: extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
570: /*
571: Performs an efficient scatter on the rows of B needed by this process; this is
572: a modification of the VecScatterBegin_() routines.
574: Input: Bbidx = 0: B = Bb
575: = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
576: */
577: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
578: {
579: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
580: PetscErrorCode ierr;
581: const PetscScalar *b;
582: PetscScalar *rvalues;
583: VecScatter ctx = aij->Mvctx;
584: const PetscInt *sindices,*sstarts,*rstarts;
585: const PetscMPIInt *sprocs,*rprocs;
586: PetscInt i,nsends,nrecvs,nrecvs2;
587: MPI_Request *swaits,*rwaits;
588: MPI_Comm comm;
589: PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,imdex,nsends_mpi,nrecvs_mpi;
590: MPI_Status status;
591: MPIAIJ_MPIDense *contents;
592: PetscContainer container;
593: Mat workB;
594: MPI_Datatype *stype,*rtype;
597: PetscObjectGetComm((PetscObject)A,&comm);
598: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);
599: if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist");
600: PetscContainerGetPointer(container,(void**)&contents);
602: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);
603: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);
604: PetscMPIIntCast(nsends,&nsends_mpi);
605: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
606: if (Bbidx == 0) {
607: workB = *outworkB = contents->workB;
608: } else {
609: workB = *outworkB = contents->workB1;
610: }
611: if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",workB->cmap->n,nrows);
612: swaits = contents->swaits;
613: rwaits = contents->rwaits;
615: MatDenseGetArrayRead(B,&b);
616: MatDenseGetArray(workB,&rvalues);
618: /* Post recv, use MPI derived data type to save memory */
619: rtype = contents->rtype;
620: for (i=0; i<nrecvs; i++) {
621: MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);
622: }
624: stype = contents->stype;
625: for (i=0; i<nsends; i++) {
626: MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);
627: }
629: nrecvs2 = nrecvs;
630: while (nrecvs2) {
631: MPI_Waitany(nrecvs_mpi,rwaits,&imdex,&status);
632: nrecvs2--;
633: }
634: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
636: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);
637: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);
638: MatDenseRestoreArrayRead(B,&b);
639: MatDenseRestoreArray(workB,&rvalues);
640: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
641: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
642: return(0);
643: }
645: /*
646: Compute Cb = A*Bb
647: */
648: PETSC_STATIC_INLINE PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense_private(Mat A,Mat Bb,PetscInt Bbidx,PetscInt start,Mat C,const PetscScalar *barray,PetscScalar *carray,Mat Cb)
649: {
650: PetscErrorCode ierr;
651: PetscInt start1;
652: Mat workB;
653: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
654: Mat_MPIDense *cbdense = (Mat_MPIDense*)Cb->data;
657: /* Place barray to Bb */
658: start1 = start*Bb->rmap->n;
659: MatDensePlaceArray(Bb,barray+start1);
661: /* get off processor parts of Bb needed to complete Cb=A*Bb */
662: MatMPIDenseScatter(A,Bb,Bbidx,C,&workB);
663: MatDenseResetArray(Bb);
665: /* off-diagonal block of A times nonlocal rows of Bb */
666: /* Place carray to Cb */
667: start1 = start*Cb->rmap->n;
668: MatDensePlaceArray(Cb,carray+start1);
669: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cbdense->A);
670: MatDenseResetArray(Cb);
671: return(0);
672: }
674: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
675: {
676: PetscErrorCode ierr;
677: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
678: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
679: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
680: Mat workB;
681: MPIAIJ_MPIDense *contents;
682: PetscContainer container;
683: MPI_Comm comm;
684: PetscInt numBb;
687: /* diagonal block of A times all local rows of B*/
688: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
690: PetscObjectGetComm((PetscObject)A,&comm);
691: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);
692: if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist");
693: PetscContainerGetPointer(container,(void**)&contents);
694: numBb = contents->numBb;
696: if (!numBb) {
697: /* get off processor parts of B needed to complete C=A*B */
698: MatMPIDenseScatter(A,B,0,C,&workB);
700: /* off-diagonal block of A times nonlocal rows of B */
701: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
703: } else {
704: const PetscScalar *barray;
705: PetscScalar *carray;
706: Mat Bb=contents->Bb,Cb=contents->Cb;
707: PetscInt BbN=Bb->cmap->N,start,i;
709: MatDenseGetArrayRead(B,&barray);
710: MatDenseGetArray(C,&carray);
711: for (i=0; i<numBb; i++) {
712: start = i*BbN;
713: MatMatMultNumeric_MPIAIJ_MPIDense_private(A,Bb,0,start,C,barray,carray,Cb);
714: }
716: if (contents->Bb1) {
717: Bb = contents->Bb1; Cb = contents->Cb1;
718: start = i*BbN;
719: MatMatMultNumeric_MPIAIJ_MPIDense_private(A,Bb,1,start,C,barray,carray,Cb);
720: }
721: MatDenseRestoreArrayRead(B,&barray);
722: MatDenseRestoreArray(C,&carray);
723: }
724: return(0);
725: }
727: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
728: {
730: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
731: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
732: Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
733: PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj;
734: PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a;
735: Mat_SeqAIJ *p_loc,*p_oth;
736: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
737: PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca;
738: PetscInt cm = C->rmap->n,anz,pnz;
739: Mat_APMPI *ptap = c->ap;
740: PetscScalar *apa_sparse;
741: PetscInt *api,*apj,*apJ,i,j,k,row;
742: PetscInt cstart = C->cmap->rstart;
743: PetscInt cdnz,conz,k0,k1,nextp;
744: MPI_Comm comm;
745: PetscMPIInt size;
748: PetscObjectGetComm((PetscObject)C,&comm);
749: MPI_Comm_size(comm,&size);
751: if (!ptap->P_oth && size>1) {
752: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
753: }
754: apa_sparse = ptap->apa;
756: /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
757: /*-----------------------------------------------------*/
758: /* update numerical values of P_oth and P_loc */
759: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
760: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
762: /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
763: /*----------------------------------------------------------*/
764: /* get data from symbolic products */
765: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
766: pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
767: if (size >1) {
768: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
769: pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
770: } else {
771: p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
772: }
774: api = ptap->api;
775: apj = ptap->apj;
776: for (i=0; i<cm; i++) {
777: apJ = apj + api[i];
779: /* diagonal portion of A */
780: anz = adi[i+1] - adi[i];
781: adj = ad->j + adi[i];
782: ada = ad->a + adi[i];
783: for (j=0; j<anz; j++) {
784: row = adj[j];
785: pnz = pi_loc[row+1] - pi_loc[row];
786: pj = pj_loc + pi_loc[row];
787: pa = pa_loc + pi_loc[row];
788: /* perform sparse axpy */
789: valtmp = ada[j];
790: nextp = 0;
791: for (k=0; nextp<pnz; k++) {
792: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
793: apa_sparse[k] += valtmp*pa[nextp++];
794: }
795: }
796: PetscLogFlops(2.0*pnz);
797: }
799: /* off-diagonal portion of A */
800: anz = aoi[i+1] - aoi[i];
801: aoj = ao->j + aoi[i];
802: aoa = ao->a + aoi[i];
803: for (j=0; j<anz; j++) {
804: row = aoj[j];
805: pnz = pi_oth[row+1] - pi_oth[row];
806: pj = pj_oth + pi_oth[row];
807: pa = pa_oth + pi_oth[row];
808: /* perform sparse axpy */
809: valtmp = aoa[j];
810: nextp = 0;
811: for (k=0; nextp<pnz; k++) {
812: if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
813: apa_sparse[k] += valtmp*pa[nextp++];
814: }
815: }
816: PetscLogFlops(2.0*pnz);
817: }
819: /* set values in C */
820: cdnz = cd->i[i+1] - cd->i[i];
821: conz = co->i[i+1] - co->i[i];
823: /* 1st off-diagonal part of C */
824: ca = coa + co->i[i];
825: k = 0;
826: for (k0=0; k0<conz; k0++) {
827: if (apJ[k] >= cstart) break;
828: ca[k0] = apa_sparse[k];
829: apa_sparse[k] = 0.0;
830: k++;
831: }
833: /* diagonal part of C */
834: ca = cda + cd->i[i];
835: for (k1=0; k1<cdnz; k1++) {
836: ca[k1] = apa_sparse[k];
837: apa_sparse[k] = 0.0;
838: k++;
839: }
841: /* 2nd off-diagonal part of C */
842: ca = coa + co->i[i];
843: for (; k0<conz; k0++) {
844: ca[k0] = apa_sparse[k];
845: apa_sparse[k] = 0.0;
846: k++;
847: }
848: }
849: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
850: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
852: if (ptap->freestruct) {
853: MatFreeIntermediateDataStructures(C);
854: }
855: return(0);
856: }
858: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
859: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
860: {
861: PetscErrorCode ierr;
862: MPI_Comm comm;
863: PetscMPIInt size;
864: Mat_APMPI *ptap;
865: PetscFreeSpaceList free_space = NULL,current_space=NULL;
866: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c;
867: Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
868: PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
869: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
870: PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
871: PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
872: PetscReal afill;
873: MatType mtype;
876: PetscObjectGetComm((PetscObject)A,&comm);
877: MPI_Comm_size(comm,&size);
879: /* create struct Mat_APMPI and attached it to C later */
880: PetscNew(&ptap);
882: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
883: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
885: /* get P_loc by taking all local rows of P */
886: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
888: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
889: pi_loc = p_loc->i; pj_loc = p_loc->j;
890: if (size > 1) {
891: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
892: pi_oth = p_oth->i; pj_oth = p_oth->j;
893: } else {
894: p_oth = NULL;
895: pi_oth = NULL; pj_oth = NULL;
896: }
898: /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
899: /*-------------------------------------------------------------------*/
900: PetscMalloc1(am+2,&api);
901: ptap->api = api;
902: api[0] = 0;
904: PetscLLCondensedCreate_Scalable(lsize,&lnk);
906: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
907: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);
908: current_space = free_space;
909: MatPreallocateInitialize(comm,am,pn,dnz,onz);
910: for (i=0; i<am; i++) {
911: /* diagonal portion of A */
912: nzi = adi[i+1] - adi[i];
913: for (j=0; j<nzi; j++) {
914: row = *adj++;
915: pnz = pi_loc[row+1] - pi_loc[row];
916: Jptr = pj_loc + pi_loc[row];
917: /* Expand list if it is not long enough */
918: if (pnz+apnz_max > lsize) {
919: lsize = pnz+apnz_max;
920: PetscLLCondensedExpand_Scalable(lsize, &lnk);
921: }
922: /* add non-zero cols of P into the sorted linked list lnk */
923: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
924: apnz = *lnk; /* The first element in the list is the number of items in the list */
925: api[i+1] = api[i] + apnz;
926: if (apnz > apnz_max) apnz_max = apnz;
927: }
928: /* off-diagonal portion of A */
929: nzi = aoi[i+1] - aoi[i];
930: for (j=0; j<nzi; j++) {
931: row = *aoj++;
932: pnz = pi_oth[row+1] - pi_oth[row];
933: Jptr = pj_oth + pi_oth[row];
934: /* Expand list if it is not long enough */
935: if (pnz+apnz_max > lsize) {
936: lsize = pnz + apnz_max;
937: PetscLLCondensedExpand_Scalable(lsize, &lnk);
938: }
939: /* add non-zero cols of P into the sorted linked list lnk */
940: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
941: apnz = *lnk; /* The first element in the list is the number of items in the list */
942: api[i+1] = api[i] + apnz;
943: if (apnz > apnz_max) apnz_max = apnz;
944: }
945: apnz = *lnk;
946: api[i+1] = api[i] + apnz;
947: if (apnz > apnz_max) apnz_max = apnz;
949: /* if free space is not available, double the total space in the list */
950: if (current_space->local_remaining<apnz) {
951: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
952: nspacedouble++;
953: }
955: /* Copy data into free space, then initialize lnk */
956: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
957: MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);
959: current_space->array += apnz;
960: current_space->local_used += apnz;
961: current_space->local_remaining -= apnz;
962: }
964: /* Allocate space for apj, initialize apj, and */
965: /* destroy list of free space and other temporary array(s) */
966: PetscMalloc1(api[am]+1,&ptap->apj);
967: apj = ptap->apj;
968: PetscFreeSpaceContiguous(&free_space,ptap->apj);
969: PetscLLCondensedDestroy_Scalable(lnk);
971: /* create and assemble symbolic parallel matrix C */
972: /*----------------------------------------------------*/
973: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
974: MatSetBlockSizesFromMats(C,A,P);
975: MatGetType(A,&mtype);
976: MatSetType(C,mtype);
977: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
979: /* malloc apa for assembly C */
980: PetscCalloc1(apnz_max,&ptap->apa);
982: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
983: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
984: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
985: MatPreallocateFinalize(dnz,onz);
987: ptap->destroy = C->ops->destroy;
988: ptap->duplicate = C->ops->duplicate;
989: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
990: C->ops->productnumeric = MatProductNumeric_AB;
991: C->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
992: C->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
994: /* attach the supporting struct to C for reuse */
995: c = (Mat_MPIAIJ*)C->data;
996: c->ap = ptap;
998: /* set MatInfo */
999: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1000: if (afill < 1.0) afill = 1.0;
1001: C->info.mallocs = nspacedouble;
1002: C->info.fill_ratio_given = fill;
1003: C->info.fill_ratio_needed = afill;
1005: #if defined(PETSC_USE_INFO)
1006: if (api[am]) {
1007: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1008: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1009: } else {
1010: PetscInfo(C,"Empty matrix product\n");
1011: }
1012: #endif
1013: return(0);
1014: }
1016: /* This function is needed for the seqMPI matrix-matrix multiplication. */
1017: /* Three input arrays are merged to one output array. The size of the */
1018: /* output array is also output. Duplicate entries only show up once. */
1019: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1,
1020: PetscInt size2, PetscInt *in2,
1021: PetscInt size3, PetscInt *in3,
1022: PetscInt *size4, PetscInt *out)
1023: {
1024: int i = 0, j = 0, k = 0, l = 0;
1026: /* Traverse all three arrays */
1027: while (i<size1 && j<size2 && k<size3) {
1028: if (in1[i] < in2[j] && in1[i] < in3[k]) {
1029: out[l++] = in1[i++];
1030: }
1031: else if(in2[j] < in1[i] && in2[j] < in3[k]) {
1032: out[l++] = in2[j++];
1033: }
1034: else if(in3[k] < in1[i] && in3[k] < in2[j]) {
1035: out[l++] = in3[k++];
1036: }
1037: else if(in1[i] == in2[j] && in1[i] < in3[k]) {
1038: out[l++] = in1[i];
1039: i++, j++;
1040: }
1041: else if(in1[i] == in3[k] && in1[i] < in2[j]) {
1042: out[l++] = in1[i];
1043: i++, k++;
1044: }
1045: else if(in3[k] == in2[j] && in2[j] < in1[i]) {
1046: out[l++] = in2[j];
1047: k++, j++;
1048: }
1049: else if(in1[i] == in2[j] && in1[i] == in3[k]) {
1050: out[l++] = in1[i];
1051: i++, j++, k++;
1052: }
1053: }
1055: /* Traverse two remaining arrays */
1056: while (i<size1 && j<size2) {
1057: if (in1[i] < in2[j]) {
1058: out[l++] = in1[i++];
1059: }
1060: else if(in1[i] > in2[j]) {
1061: out[l++] = in2[j++];
1062: }
1063: else {
1064: out[l++] = in1[i];
1065: i++, j++;
1066: }
1067: }
1069: while (i<size1 && k<size3) {
1070: if (in1[i] < in3[k]) {
1071: out[l++] = in1[i++];
1072: }
1073: else if(in1[i] > in3[k]) {
1074: out[l++] = in3[k++];
1075: }
1076: else {
1077: out[l++] = in1[i];
1078: i++, k++;
1079: }
1080: }
1082: while (k<size3 && j<size2) {
1083: if (in3[k] < in2[j]) {
1084: out[l++] = in3[k++];
1085: }
1086: else if(in3[k] > in2[j]) {
1087: out[l++] = in2[j++];
1088: }
1089: else {
1090: out[l++] = in3[k];
1091: k++, j++;
1092: }
1093: }
1095: /* Traverse one remaining array */
1096: while (i<size1) out[l++] = in1[i++];
1097: while (j<size2) out[l++] = in2[j++];
1098: while (k<size3) out[l++] = in3[k++];
1100: *size4 = l;
1101: }
1103: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */
1104: /* adds up the products. Two of these three multiplications are performed with existing (sequential) */
1105: /* matrix-matrix multiplications. */
1106: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1107: {
1108: PetscErrorCode ierr;
1109: MPI_Comm comm;
1110: PetscMPIInt size;
1111: Mat_APMPI *ptap;
1112: PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
1113: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data;
1114: Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
1115: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data;
1116: Mat_MPIAIJ *c;
1117: Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq;
1118: PetscInt adponz, adpdnz;
1119: PetscInt *pi_loc,*dnz,*onz;
1120: PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
1121: PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
1122: *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1123: PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
1124: PetscBT lnkbt;
1125: PetscReal afill;
1126: PetscMPIInt rank;
1127: Mat adpd, aopoth;
1128: MatType mtype;
1129: const char *prefix;
1132: PetscObjectGetComm((PetscObject)A,&comm);
1133: MPI_Comm_size(comm,&size);
1134: MPI_Comm_rank(comm, &rank);
1135: MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);
1137: /* create struct Mat_APMPI and attached it to C later */
1138: PetscNew(&ptap);
1140: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1141: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1143: /* get P_loc by taking all local rows of P */
1144: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
1147: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1148: pi_loc = p_loc->i;
1150: /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1151: PetscMalloc1(am+2,&api);
1152: PetscMalloc1(am+2,&adpoi);
1154: adpoi[0] = 0;
1155: ptap->api = api;
1156: api[0] = 0;
1158: /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1159: PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);
1160: MatPreallocateInitialize(comm,am,pn,dnz,onz);
1162: /* Symbolic calc of A_loc_diag * P_loc_diag */
1163: MatGetOptionsPrefix(A,&prefix);
1164: MatProductCreate(a->A,p->A,NULL,&adpd);
1165: MatGetOptionsPrefix(A,&prefix);
1166: MatSetOptionsPrefix(adpd,prefix);
1167: MatAppendOptionsPrefix(adpd,"inner_diag_");
1169: MatProductSetType(adpd,MATPRODUCT_AB);
1170: MatProductSetAlgorithm(adpd,"sorted");
1171: MatProductSetFill(adpd,fill);
1172: MatProductSetFromOptions(adpd);
1173: MatProductSymbolic(adpd);
1175: adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1176: adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1177: p_off = (Mat_SeqAIJ*)((p->B)->data);
1178: poff_i = p_off->i; poff_j = p_off->j;
1180: /* j_temp stores indices of a result row before they are added to the linked list */
1181: PetscMalloc1(pN+2,&j_temp);
1184: /* Symbolic calc of the A_diag * p_loc_off */
1185: /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1186: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);
1187: current_space = free_space_diag;
1189: for (i=0; i<am; i++) {
1190: /* A_diag * P_loc_off */
1191: nzi = adi[i+1] - adi[i];
1192: for (j=0; j<nzi; j++) {
1193: row = *adj++;
1194: pnz = poff_i[row+1] - poff_i[row];
1195: Jptr = poff_j + poff_i[row];
1196: for(i1 = 0; i1 < pnz; i1++) {
1197: j_temp[i1] = p->garray[Jptr[i1]];
1198: }
1199: /* add non-zero cols of P into the sorted linked list lnk */
1200: PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);
1201: }
1203: adponz = lnk[0];
1204: adpoi[i+1] = adpoi[i] + adponz;
1206: /* if free space is not available, double the total space in the list */
1207: if (current_space->local_remaining<adponz) {
1208: PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);
1209: nspacedouble++;
1210: }
1212: /* Copy data into free space, then initialize lnk */
1213: PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);
1215: current_space->array += adponz;
1216: current_space->local_used += adponz;
1217: current_space->local_remaining -= adponz;
1218: }
1220: /* Symbolic calc of A_off * P_oth */
1221: MatSetOptionsPrefix(a->B,prefix);
1222: MatAppendOptionsPrefix(a->B,"inner_offdiag_");
1223: MatCreate(PETSC_COMM_SELF,&aopoth);
1224: MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1225: aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1226: aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1228: /* Allocate space for apj, adpj, aopj, ... */
1229: /* destroy lists of free space and other temporary array(s) */
1231: PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);
1232: PetscMalloc1(adpoi[am]+2, &adpoj);
1234: /* Copy from linked list to j-array */
1235: PetscFreeSpaceContiguous(&free_space_diag,adpoj);
1236: PetscLLDestroy(lnk,lnkbt);
1238: adpoJ = adpoj;
1239: adpdJ = adpdj;
1240: aopJ = aopothj;
1241: apj = ptap->apj;
1242: apJ = apj; /* still empty */
1244: /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1245: /* A_diag * P_loc_diag to get A*P */
1246: for (i = 0; i < am; i++) {
1247: aopnz = aopothi[i+1] - aopothi[i];
1248: adponz = adpoi[i+1] - adpoi[i];
1249: adpdnz = adpdi[i+1] - adpdi[i];
1251: /* Correct indices from A_diag*P_diag */
1252: for(i1 = 0; i1 < adpdnz; i1++) {
1253: adpdJ[i1] += p_colstart;
1254: }
1255: /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1256: Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1257: MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);
1259: aopJ += aopnz;
1260: adpoJ += adponz;
1261: adpdJ += adpdnz;
1262: apJ += apnz;
1263: api[i+1] = api[i] + apnz;
1264: }
1266: /* malloc apa to store dense row A[i,:]*P */
1267: PetscCalloc1(pN+2,&ptap->apa);
1269: /* create and assemble symbolic parallel matrix C */
1270: MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1271: MatSetBlockSizesFromMats(C,A,P);
1272: MatGetType(A,&mtype);
1273: MatSetType(C,mtype);
1274: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1277: MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1278: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1279: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1280: MatPreallocateFinalize(dnz,onz);
1283: ptap->destroy = C->ops->destroy;
1284: ptap->duplicate = C->ops->duplicate;
1285: C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1286: C->ops->productnumeric = MatProductNumeric_AB;
1287: C->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
1289: /* attach the supporting struct to C for reuse */
1290: c = (Mat_MPIAIJ*)C->data;
1291: c->ap = ptap;
1293: /* set MatInfo */
1294: afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1295: if (afill < 1.0) afill = 1.0;
1296: C->info.mallocs = nspacedouble;
1297: C->info.fill_ratio_given = fill;
1298: C->info.fill_ratio_needed = afill;
1300: #if defined(PETSC_USE_INFO)
1301: if (api[am]) {
1302: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
1303: PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);
1304: } else {
1305: PetscInfo(C,"Empty matrix product\n");
1306: }
1307: #endif
1309: MatDestroy(&aopoth);
1310: MatDestroy(&adpd);
1311: PetscFree(j_temp);
1312: PetscFree(adpoj);
1313: PetscFree(adpoi);
1314: return(0);
1315: }
1317: /*-------------------------------------------------------------------------*/
1318: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1319: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1320: {
1322: Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data;
1323: Mat_APMPI *ptap= c->ap;
1324: Mat Pt;
1327: if (!ptap->Pt) {
1328: MPI_Comm comm;
1329: PetscObjectGetComm((PetscObject)C,&comm);
1330: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1331: }
1333: Pt = ptap->Pt;
1334: MatTranspose(P,MAT_REUSE_MATRIX,&Pt);
1335: MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);
1337: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1338: if (ptap->freestruct) {
1339: MatFreeIntermediateDataStructures(C);
1340: }
1341: return(0);
1342: }
1344: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1345: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1346: {
1347: PetscErrorCode ierr;
1348: Mat_APMPI *ptap;
1349: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c;
1350: MPI_Comm comm;
1351: PetscMPIInt size,rank;
1352: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1353: PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1354: PetscInt *lnk,i,k,nsend;
1355: PetscBT lnkbt;
1356: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
1357: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1358: PetscInt len,proc,*dnz,*onz,*owners,nzi;
1359: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1360: MPI_Request *swaits,*rwaits;
1361: MPI_Status *sstatus,rstatus;
1362: PetscLayout rowmap;
1363: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1364: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
1365: PetscInt *Jptr,*prmap=p->garray,con,j,Crmax;
1366: Mat_SeqAIJ *a_loc,*c_loc,*c_oth;
1367: PetscTable ta;
1368: MatType mtype;
1369: const char *prefix;
1372: PetscObjectGetComm((PetscObject)A,&comm);
1373: MPI_Comm_size(comm,&size);
1374: MPI_Comm_rank(comm,&rank);
1376: /* create symbolic parallel matrix C */
1377: MatGetType(A,&mtype);
1378: MatSetType(C,mtype);
1380: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1382: /* create struct Mat_APMPI and attached it to C later */
1383: PetscNew(&ptap);
1384: ptap->reuse = MAT_INITIAL_MATRIX;
1386: /* (0) compute Rd = Pd^T, Ro = Po^T */
1387: /* --------------------------------- */
1388: MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
1389: MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
1391: /* (1) compute symbolic A_loc */
1392: /* ---------------------------*/
1393: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);
1395: /* (2-1) compute symbolic C_oth = Ro*A_loc */
1396: /* ------------------------------------ */
1397: MatGetOptionsPrefix(A,&prefix);
1398: MatSetOptionsPrefix(ptap->Ro,prefix);
1399: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
1400: MatCreate(PETSC_COMM_SELF,&ptap->C_oth);
1401: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);
1403: /* (3) send coj of C_oth to other processors */
1404: /* ------------------------------------------ */
1405: /* determine row ownership */
1406: PetscLayoutCreate(comm,&rowmap);
1407: rowmap->n = pn;
1408: rowmap->bs = 1;
1409: PetscLayoutSetUp(rowmap);
1410: owners = rowmap->range;
1412: /* determine the number of messages to send, their lengths */
1413: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
1414: PetscArrayzero(len_s,size);
1415: PetscArrayzero(len_si,size);
1417: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1418: coi = c_oth->i; coj = c_oth->j;
1419: con = ptap->C_oth->rmap->n;
1420: proc = 0;
1421: for (i=0; i<con; i++) {
1422: while (prmap[i] >= owners[proc+1]) proc++;
1423: len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */
1424: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1425: }
1427: len = 0; /* max length of buf_si[], see (4) */
1428: owners_co[0] = 0;
1429: nsend = 0;
1430: for (proc=0; proc<size; proc++) {
1431: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1432: if (len_s[proc]) {
1433: nsend++;
1434: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1435: len += len_si[proc];
1436: }
1437: }
1439: /* determine the number and length of messages to receive for coi and coj */
1440: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
1441: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
1443: /* post the Irecv and Isend of coj */
1444: PetscCommGetNewTag(comm,&tagj);
1445: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
1446: PetscMalloc1(nsend+1,&swaits);
1447: for (proc=0, k=0; proc<size; proc++) {
1448: if (!len_s[proc]) continue;
1449: i = owners_co[proc];
1450: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1451: k++;
1452: }
1454: /* (2-2) compute symbolic C_loc = Rd*A_loc */
1455: /* ---------------------------------------- */
1456: MatSetOptionsPrefix(ptap->Rd,prefix);
1457: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
1458: MatCreate(PETSC_COMM_SELF,&ptap->C_loc);
1459: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);
1460: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1462: /* receives coj are complete */
1463: for (i=0; i<nrecv; i++) {
1464: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1465: }
1466: PetscFree(rwaits);
1467: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1469: /* add received column indices into ta to update Crmax */
1470: a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1472: /* create and initialize a linked list */
1473: PetscTableCreate(an,aN,&ta); /* for compute Crmax */
1474: MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1476: for (k=0; k<nrecv; k++) {/* k-th received message */
1477: Jptr = buf_rj[k];
1478: for (j=0; j<len_r[k]; j++) {
1479: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
1480: }
1481: }
1482: PetscTableGetCount(ta,&Crmax);
1483: PetscTableDestroy(&ta);
1485: /* (4) send and recv coi */
1486: /*-----------------------*/
1487: PetscCommGetNewTag(comm,&tagi);
1488: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
1489: PetscMalloc1(len+1,&buf_s);
1490: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1491: for (proc=0,k=0; proc<size; proc++) {
1492: if (!len_s[proc]) continue;
1493: /* form outgoing message for i-structure:
1494: buf_si[0]: nrows to be sent
1495: [1:nrows]: row index (global)
1496: [nrows+1:2*nrows+1]: i-structure index
1497: */
1498: /*-------------------------------------------*/
1499: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1500: buf_si_i = buf_si + nrows+1;
1501: buf_si[0] = nrows;
1502: buf_si_i[0] = 0;
1503: nrows = 0;
1504: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1505: nzi = coi[i+1] - coi[i];
1506: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1507: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
1508: nrows++;
1509: }
1510: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
1511: k++;
1512: buf_si += len_si[proc];
1513: }
1514: for (i=0; i<nrecv; i++) {
1515: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
1516: }
1517: PetscFree(rwaits);
1518: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
1520: PetscFree4(len_s,len_si,sstatus,owners_co);
1521: PetscFree(len_ri);
1522: PetscFree(swaits);
1523: PetscFree(buf_s);
1525: /* (5) compute the local portion of C */
1526: /* ------------------------------------------ */
1527: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1528: PetscFreeSpaceGet(Crmax,&free_space);
1529: current_space = free_space;
1531: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
1532: for (k=0; k<nrecv; k++) {
1533: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1534: nrows = *buf_ri_k[k];
1535: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
1536: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1537: }
1539: MatPreallocateInitialize(comm,pn,an,dnz,onz);
1540: PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);
1541: for (i=0; i<pn; i++) {
1542: /* add C_loc into C */
1543: nzi = c_loc->i[i+1] - c_loc->i[i];
1544: Jptr = c_loc->j + c_loc->i[i];
1545: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1547: /* add received col data into lnk */
1548: for (k=0; k<nrecv; k++) { /* k-th received message */
1549: if (i == *nextrow[k]) { /* i-th row */
1550: nzi = *(nextci[k]+1) - *nextci[k];
1551: Jptr = buf_rj[k] + *nextci[k];
1552: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
1553: nextrow[k]++; nextci[k]++;
1554: }
1555: }
1556: nzi = lnk[0];
1558: /* copy data into free space, then initialize lnk */
1559: PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);
1560: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
1561: }
1562: PetscFree3(buf_ri_k,nextrow,nextci);
1563: PetscLLDestroy(lnk,lnkbt);
1564: PetscFreeSpaceDestroy(free_space);
1566: /* local sizes and preallocation */
1567: MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);
1568: if (P->cmap->bs > 0) {PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);}
1569: if (A->cmap->bs > 0) {PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);}
1570: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
1571: MatPreallocateFinalize(dnz,onz);
1573: /* members in merge */
1574: PetscFree(id_r);
1575: PetscFree(len_r);
1576: PetscFree(buf_ri[0]);
1577: PetscFree(buf_ri);
1578: PetscFree(buf_rj[0]);
1579: PetscFree(buf_rj);
1580: PetscLayoutDestroy(&rowmap);
1582: /* attach the supporting struct to C for reuse */
1583: c = (Mat_MPIAIJ*)C->data;
1584: c->ap = ptap;
1585: ptap->destroy = C->ops->destroy;
1587: /* C is not ready for use - assembly will be done by MatPtAPNumeric() */
1588: C->assembled = PETSC_FALSE;
1589: C->ops->destroy = MatDestroy_MPIAIJ_PtAP;
1590: C->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
1591: return(0);
1592: }
1594: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1595: {
1596: PetscErrorCode ierr;
1597: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1598: Mat_SeqAIJ *c_seq;
1599: Mat_APMPI *ptap = c->ap;
1600: Mat A_loc,C_loc,C_oth;
1601: PetscInt i,rstart,rend,cm,ncols,row;
1602: const PetscInt *cols;
1603: const PetscScalar *vals;
1606: if (!ptap->A_loc) {
1607: MPI_Comm comm;
1608: PetscObjectGetComm((PetscObject)C,&comm);
1609: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1610: }
1612: MatZeroEntries(C);
1614: if (ptap->reuse == MAT_REUSE_MATRIX) {
1615: /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1616: /* 1) get R = Pd^T, Ro = Po^T */
1617: /*----------------------------*/
1618: MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1619: MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1621: /* 2) compute numeric A_loc */
1622: /*--------------------------*/
1623: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);
1624: }
1626: /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1627: A_loc = ptap->A_loc;
1628: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);
1629: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);
1630: C_loc = ptap->C_loc;
1631: C_oth = ptap->C_oth;
1633: /* add C_loc and Co to to C */
1634: MatGetOwnershipRange(C,&rstart,&rend);
1636: /* C_loc -> C */
1637: cm = C_loc->rmap->N;
1638: c_seq = (Mat_SeqAIJ*)C_loc->data;
1639: cols = c_seq->j;
1640: vals = c_seq->a;
1641: for (i=0; i<cm; i++) {
1642: ncols = c_seq->i[i+1] - c_seq->i[i];
1643: row = rstart + i;
1644: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1645: cols += ncols; vals += ncols;
1646: }
1648: /* Co -> C, off-processor part */
1649: cm = C_oth->rmap->N;
1650: c_seq = (Mat_SeqAIJ*)C_oth->data;
1651: cols = c_seq->j;
1652: vals = c_seq->a;
1653: for (i=0; i<cm; i++) {
1654: ncols = c_seq->i[i+1] - c_seq->i[i];
1655: row = p->garray[i];
1656: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1657: cols += ncols; vals += ncols;
1658: }
1659: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1660: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1662: ptap->reuse = MAT_REUSE_MATRIX;
1664: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1665: if (ptap->freestruct) {
1666: MatFreeIntermediateDataStructures(C);
1667: }
1668: return(0);
1669: }
1671: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1672: {
1673: PetscErrorCode ierr;
1674: Mat_Merge_SeqsToMPI *merge;
1675: Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1676: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1677: Mat_APMPI *ptap;
1678: PetscInt *adj;
1679: PetscInt i,j,k,anz,pnz,row,*cj,nexta;
1680: MatScalar *ada,*ca,valtmp;
1681: PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1682: MPI_Comm comm;
1683: PetscMPIInt size,rank,taga,*len_s;
1684: PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1685: PetscInt **buf_ri,**buf_rj;
1686: PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1687: MPI_Request *s_waits,*r_waits;
1688: MPI_Status *status;
1689: MatScalar **abuf_r,*ba_i,*pA,*coa,*ba;
1690: PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ;
1691: Mat A_loc;
1692: Mat_SeqAIJ *a_loc;
1695: PetscObjectGetComm((PetscObject)C,&comm);
1696: MPI_Comm_size(comm,&size);
1697: MPI_Comm_rank(comm,&rank);
1699: ptap = c->ap;
1700: if (!ptap->A_loc) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1701: merge = ptap->merge;
1703: /* 2) compute numeric C_seq = P_loc^T*A_loc */
1704: /*------------------------------------------*/
1705: /* get data from symbolic products */
1706: coi = merge->coi; coj = merge->coj;
1707: PetscCalloc1(coi[pon]+1,&coa);
1708: bi = merge->bi; bj = merge->bj;
1709: owners = merge->rowmap->range;
1710: PetscCalloc1(bi[cm]+1,&ba);
1712: /* get A_loc by taking all local rows of A */
1713: A_loc = ptap->A_loc;
1714: MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);
1715: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1716: ai = a_loc->i;
1717: aj = a_loc->j;
1719: for (i=0; i<am; i++) {
1720: anz = ai[i+1] - ai[i];
1721: adj = aj + ai[i];
1722: ada = a_loc->a + ai[i];
1724: /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1725: /*-------------------------------------------------------------*/
1726: /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1727: pnz = po->i[i+1] - po->i[i];
1728: poJ = po->j + po->i[i];
1729: pA = po->a + po->i[i];
1730: for (j=0; j<pnz; j++) {
1731: row = poJ[j];
1732: cj = coj + coi[row];
1733: ca = coa + coi[row];
1734: /* perform sparse axpy */
1735: nexta = 0;
1736: valtmp = pA[j];
1737: for (k=0; nexta<anz; k++) {
1738: if (cj[k] == adj[nexta]) {
1739: ca[k] += valtmp*ada[nexta];
1740: nexta++;
1741: }
1742: }
1743: PetscLogFlops(2.0*anz);
1744: }
1746: /* put the value into Cd (diagonal part) */
1747: pnz = pd->i[i+1] - pd->i[i];
1748: pdJ = pd->j + pd->i[i];
1749: pA = pd->a + pd->i[i];
1750: for (j=0; j<pnz; j++) {
1751: row = pdJ[j];
1752: cj = bj + bi[row];
1753: ca = ba + bi[row];
1754: /* perform sparse axpy */
1755: nexta = 0;
1756: valtmp = pA[j];
1757: for (k=0; nexta<anz; k++) {
1758: if (cj[k] == adj[nexta]) {
1759: ca[k] += valtmp*ada[nexta];
1760: nexta++;
1761: }
1762: }
1763: PetscLogFlops(2.0*anz);
1764: }
1765: }
1767: /* 3) send and recv matrix values coa */
1768: /*------------------------------------*/
1769: buf_ri = merge->buf_ri;
1770: buf_rj = merge->buf_rj;
1771: len_s = merge->len_s;
1772: PetscCommGetNewTag(comm,&taga);
1773: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
1775: PetscMalloc2(merge->nsend+1,&s_waits,size,&status);
1776: for (proc=0,k=0; proc<size; proc++) {
1777: if (!len_s[proc]) continue;
1778: i = merge->owners_co[proc];
1779: MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
1780: k++;
1781: }
1782: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
1783: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
1785: PetscFree2(s_waits,status);
1786: PetscFree(r_waits);
1787: PetscFree(coa);
1789: /* 4) insert local Cseq and received values into Cmpi */
1790: /*----------------------------------------------------*/
1791: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
1792: for (k=0; k<merge->nrecv; k++) {
1793: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1794: nrows = *(buf_ri_k[k]);
1795: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
1796: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
1797: }
1799: for (i=0; i<cm; i++) {
1800: row = owners[rank] + i; /* global row index of C_seq */
1801: bj_i = bj + bi[i]; /* col indices of the i-th row of C */
1802: ba_i = ba + bi[i];
1803: bnz = bi[i+1] - bi[i];
1804: /* add received vals into ba */
1805: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1806: /* i-th row */
1807: if (i == *nextrow[k]) {
1808: cnz = *(nextci[k]+1) - *nextci[k];
1809: cj = buf_rj[k] + *(nextci[k]);
1810: ca = abuf_r[k] + *(nextci[k]);
1811: nextcj = 0;
1812: for (j=0; nextcj<cnz; j++) {
1813: if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1814: ba_i[j] += ca[nextcj++];
1815: }
1816: }
1817: nextrow[k]++; nextci[k]++;
1818: PetscLogFlops(2.0*cnz);
1819: }
1820: }
1821: MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
1822: }
1823: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1824: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1826: PetscFree(ba);
1827: PetscFree(abuf_r[0]);
1828: PetscFree(abuf_r);
1829: PetscFree3(buf_ri_k,nextrow,nextci);
1831: if (ptap->freestruct) {
1832: MatFreeIntermediateDataStructures(C);
1833: }
1834: return(0);
1835: }
1837: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1838: {
1839: PetscErrorCode ierr;
1840: Mat A_loc,POt,PDt;
1841: Mat_APMPI *ptap;
1842: PetscFreeSpaceList free_space=NULL,current_space=NULL;
1843: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c;
1844: PetscInt *pdti,*pdtj,*poti,*potj,*ptJ;
1845: PetscInt nnz;
1846: PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1847: PetscInt am =A->rmap->n,pn=P->cmap->n;
1848: MPI_Comm comm;
1849: PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1850: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
1851: PetscInt len,proc,*dnz,*onz,*owners;
1852: PetscInt nzi,*bi,*bj;
1853: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1854: MPI_Request *swaits,*rwaits;
1855: MPI_Status *sstatus,rstatus;
1856: Mat_Merge_SeqsToMPI *merge;
1857: PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1858: PetscReal afill =1.0,afill_tmp;
1859: PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1860: Mat_SeqAIJ *a_loc,*pdt,*pot;
1861: PetscTable ta;
1862: MatType mtype;
1865: PetscObjectGetComm((PetscObject)A,&comm);
1866: /* check if matrix local sizes are compatible */
1867: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1869: MPI_Comm_size(comm,&size);
1870: MPI_Comm_rank(comm,&rank);
1872: /* create struct Mat_APMPI and attached it to C later */
1873: PetscNew(&ptap);
1875: /* get A_loc by taking all local rows of A */
1876: MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);
1878: ptap->A_loc = A_loc;
1879: a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1880: ai = a_loc->i;
1881: aj = a_loc->j;
1883: /* determine symbolic Co=(p->B)^T*A - send to others */
1884: /*----------------------------------------------------*/
1885: MatTransposeSymbolic_SeqAIJ(p->A,&PDt);
1886: pdt = (Mat_SeqAIJ*)PDt->data;
1887: pdti = pdt->i; pdtj = pdt->j;
1889: MatTransposeSymbolic_SeqAIJ(p->B,&POt);
1890: pot = (Mat_SeqAIJ*)POt->data;
1891: poti = pot->i; potj = pot->j;
1893: /* then, compute symbolic Co = (p->B)^T*A */
1894: pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1895: >= (num of nonzero rows of C_seq) - pn */
1896: PetscMalloc1(pon+1,&coi);
1897: coi[0] = 0;
1899: /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1900: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1901: PetscFreeSpaceGet(nnz,&free_space);
1902: current_space = free_space;
1904: /* create and initialize a linked list */
1905: PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);
1906: MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1907: PetscTableGetCount(ta,&Armax);
1909: PetscLLCondensedCreate_Scalable(Armax,&lnk);
1911: for (i=0; i<pon; i++) {
1912: pnz = poti[i+1] - poti[i];
1913: ptJ = potj + poti[i];
1914: for (j=0; j<pnz; j++) {
1915: row = ptJ[j]; /* row of A_loc == col of Pot */
1916: anz = ai[row+1] - ai[row];
1917: Jptr = aj + ai[row];
1918: /* add non-zero cols of AP into the sorted linked list lnk */
1919: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
1920: }
1921: nnz = lnk[0];
1923: /* If free space is not available, double the total space in the list */
1924: if (current_space->local_remaining<nnz) {
1925: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
1926: nspacedouble++;
1927: }
1929: /* Copy data into free space, and zero out denserows */
1930: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
1932: current_space->array += nnz;
1933: current_space->local_used += nnz;
1934: current_space->local_remaining -= nnz;
1936: coi[i+1] = coi[i] + nnz;
1937: }
1939: PetscMalloc1(coi[pon]+1,&coj);
1940: PetscFreeSpaceContiguous(&free_space,coj);
1941: PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */
1943: afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1944: if (afill_tmp > afill) afill = afill_tmp;
1946: /* send j-array (coj) of Co to other processors */
1947: /*----------------------------------------------*/
1948: /* determine row ownership */
1949: PetscNew(&merge);
1950: PetscLayoutCreate(comm,&merge->rowmap);
1952: merge->rowmap->n = pn;
1953: merge->rowmap->bs = 1;
1955: PetscLayoutSetUp(merge->rowmap);
1956: owners = merge->rowmap->range;
1958: /* determine the number of messages to send, their lengths */
1959: PetscCalloc1(size,&len_si);
1960: PetscCalloc1(size,&merge->len_s);
1962: len_s = merge->len_s;
1963: merge->nsend = 0;
1965: PetscMalloc1(size+2,&owners_co);
1967: proc = 0;
1968: for (i=0; i<pon; i++) {
1969: while (prmap[i] >= owners[proc+1]) proc++;
1970: len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1971: len_s[proc] += coi[i+1] - coi[i];
1972: }
1974: len = 0; /* max length of buf_si[] */
1975: owners_co[0] = 0;
1976: for (proc=0; proc<size; proc++) {
1977: owners_co[proc+1] = owners_co[proc] + len_si[proc];
1978: if (len_si[proc]) {
1979: merge->nsend++;
1980: len_si[proc] = 2*(len_si[proc] + 1);
1981: len += len_si[proc];
1982: }
1983: }
1985: /* determine the number and length of messages to receive for coi and coj */
1986: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
1987: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
1989: /* post the Irecv and Isend of coj */
1990: PetscCommGetNewTag(comm,&tagj);
1991: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
1992: PetscMalloc1(merge->nsend+1,&swaits);
1993: for (proc=0, k=0; proc<size; proc++) {
1994: if (!len_s[proc]) continue;
1995: i = owners_co[proc];
1996: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
1997: k++;
1998: }
2000: /* receives and sends of coj are complete */
2001: PetscMalloc1(size,&sstatus);
2002: for (i=0; i<merge->nrecv; i++) {
2003: PetscMPIInt icompleted;
2004: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
2005: }
2006: PetscFree(rwaits);
2007: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
2009: /* add received column indices into table to update Armax */
2010: /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
2011: for (k=0; k<merge->nrecv; k++) {/* k-th received message */
2012: Jptr = buf_rj[k];
2013: for (j=0; j<merge->len_r[k]; j++) {
2014: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
2015: }
2016: }
2017: PetscTableGetCount(ta,&Armax);
2018: /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */
2020: /* send and recv coi */
2021: /*-------------------*/
2022: PetscCommGetNewTag(comm,&tagi);
2023: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
2024: PetscMalloc1(len+1,&buf_s);
2025: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
2026: for (proc=0,k=0; proc<size; proc++) {
2027: if (!len_s[proc]) continue;
2028: /* form outgoing message for i-structure:
2029: buf_si[0]: nrows to be sent
2030: [1:nrows]: row index (global)
2031: [nrows+1:2*nrows+1]: i-structure index
2032: */
2033: /*-------------------------------------------*/
2034: nrows = len_si[proc]/2 - 1;
2035: buf_si_i = buf_si + nrows+1;
2036: buf_si[0] = nrows;
2037: buf_si_i[0] = 0;
2038: nrows = 0;
2039: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
2040: nzi = coi[i+1] - coi[i];
2041: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
2042: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
2043: nrows++;
2044: }
2045: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
2046: k++;
2047: buf_si += len_si[proc];
2048: }
2049: i = merge->nrecv;
2050: while (i--) {
2051: PetscMPIInt icompleted;
2052: MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
2053: }
2054: PetscFree(rwaits);
2055: if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}
2056: PetscFree(len_si);
2057: PetscFree(len_ri);
2058: PetscFree(swaits);
2059: PetscFree(sstatus);
2060: PetscFree(buf_s);
2062: /* compute the local portion of C (mpi mat) */
2063: /*------------------------------------------*/
2064: /* allocate bi array and free space for accumulating nonzero column info */
2065: PetscMalloc1(pn+1,&bi);
2066: bi[0] = 0;
2068: /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
2069: nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
2070: PetscFreeSpaceGet(nnz,&free_space);
2071: current_space = free_space;
2073: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);
2074: for (k=0; k<merge->nrecv; k++) {
2075: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
2076: nrows = *buf_ri_k[k];
2077: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
2078: nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */
2079: }
2081: PetscLLCondensedCreate_Scalable(Armax,&lnk);
2082: MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);
2083: rmax = 0;
2084: for (i=0; i<pn; i++) {
2085: /* add pdt[i,:]*AP into lnk */
2086: pnz = pdti[i+1] - pdti[i];
2087: ptJ = pdtj + pdti[i];
2088: for (j=0; j<pnz; j++) {
2089: row = ptJ[j]; /* row of AP == col of Pt */
2090: anz = ai[row+1] - ai[row];
2091: Jptr = aj + ai[row];
2092: /* add non-zero cols of AP into the sorted linked list lnk */
2093: PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);
2094: }
2096: /* add received col data into lnk */
2097: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
2098: if (i == *nextrow[k]) { /* i-th row */
2099: nzi = *(nextci[k]+1) - *nextci[k];
2100: Jptr = buf_rj[k] + *nextci[k];
2101: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
2102: nextrow[k]++; nextci[k]++;
2103: }
2104: }
2105: nnz = lnk[0];
2107: /* if free space is not available, make more free space */
2108: if (current_space->local_remaining<nnz) {
2109: PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);
2110: nspacedouble++;
2111: }
2112: /* copy data into free space, then initialize lnk */
2113: PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);
2114: MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);
2116: current_space->array += nnz;
2117: current_space->local_used += nnz;
2118: current_space->local_remaining -= nnz;
2120: bi[i+1] = bi[i] + nnz;
2121: if (nnz > rmax) rmax = nnz;
2122: }
2123: PetscFree3(buf_ri_k,nextrow,nextci);
2125: PetscMalloc1(bi[pn]+1,&bj);
2126: PetscFreeSpaceContiguous(&free_space,bj);
2127: afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
2128: if (afill_tmp > afill) afill = afill_tmp;
2129: PetscLLCondensedDestroy_Scalable(lnk);
2130: PetscTableDestroy(&ta);
2132: MatDestroy(&POt);
2133: MatDestroy(&PDt);
2135: /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */
2136: /*-------------------------------------------------------------------------------*/
2137: MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);
2138: MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));
2139: MatGetType(A,&mtype);
2140: MatSetType(C,mtype);
2141: MatMPIAIJSetPreallocation(C,0,dnz,0,onz);
2142: MatPreallocateFinalize(dnz,onz);
2143: MatSetBlockSize(C,1);
2144: for (i=0; i<pn; i++) {
2145: row = i + rstart;
2146: nnz = bi[i+1] - bi[i];
2147: Jptr = bj + bi[i];
2148: MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);
2149: }
2150: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
2151: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
2152: merge->bi = bi;
2153: merge->bj = bj;
2154: merge->coi = coi;
2155: merge->coj = coj;
2156: merge->buf_ri = buf_ri;
2157: merge->buf_rj = buf_rj;
2158: merge->owners_co = owners_co;
2160: /* attach the supporting struct to C for reuse */
2161: c = (Mat_MPIAIJ*)C->data;
2163: c->ap = ptap;
2164: ptap->api = NULL;
2165: ptap->apj = NULL;
2166: ptap->merge = merge;
2167: ptap->apa = NULL;
2168: ptap->destroy = C->ops->destroy;
2169: ptap->duplicate = C->ops->duplicate;
2171: C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2172: C->ops->destroy = MatDestroy_MPIAIJ_PtAP;
2173: C->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
2175: #if defined(PETSC_USE_INFO)
2176: if (bi[pn] != 0) {
2177: PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);
2178: PetscInfo1(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);
2179: } else {
2180: PetscInfo(C,"Empty matrix product\n");
2181: }
2182: #endif
2183: return(0);
2184: }
2186: /* ---------------------------------------------------------------- */
2187: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2188: {
2190: Mat_Product *product = C->product;
2191: Mat A=product->A,B=product->B;
2192: PetscReal fill=product->fill;
2193: PetscBool flg;
2196: /* scalable */
2197: PetscStrcmp(product->alg,"scalable",&flg);
2198: if (flg) {
2199: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);
2200: goto next;
2201: }
2203: /* nonscalable */
2204: PetscStrcmp(product->alg,"nonscalable",&flg);
2205: if (flg) {
2206: MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);
2207: goto next;
2208: }
2210: /* matmatmult */
2211: PetscStrcmp(product->alg,"at*b",&flg);
2212: if (flg) {
2213: Mat At;
2214: Mat_APMPI *ptap;
2215: Mat_MPIAIJ *c;
2216: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
2218: MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);
2219: c = (Mat_MPIAIJ*)C->data;
2220: ptap = c->ap;
2221: if (ptap) {
2222: ptap->Pt = At;
2223: C->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
2224: }
2225: C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2226: goto next;
2227: }
2229: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2231: next:
2232: C->ops->productnumeric = MatProductNumeric_AtB;
2234: {
2235: Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data;
2236: Mat_APMPI *ap = c->ap;
2237: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatFreeIntermediateDataStructures","Mat");
2238: ap->freestruct = PETSC_FALSE;
2239: PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);
2240: PetscOptionsEnd();
2241: }
2242: return(0);
2243: }
2245: /* ---------------------------------------------------------------- */
2246: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2247: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2248: {
2250: Mat_Product *product = C->product;
2251: Mat A=product->A,B=product->B;
2252: #if defined(PETSC_HAVE_HYPRE)
2253: const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"};
2254: PetscInt nalg = 4;
2255: #else
2256: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2257: PetscInt nalg = 3;
2258: #endif
2259: PetscInt alg = 1; /* set nonscalable algorithm as default */
2260: PetscBool flg;
2261: MPI_Comm comm;
2264: /* Check matrix local sizes */
2265: PetscObjectGetComm((PetscObject)C,&comm);
2266: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
2268: /* Set "nonscalable" as default algorithm */
2269: PetscStrcmp(C->product->alg,"default",&flg);
2270: if (flg) {
2271: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2273: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2274: if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2275: MatInfo Ainfo,Binfo;
2276: PetscInt nz_local;
2277: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2279: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2280: MatGetInfo(B,MAT_LOCAL,&Binfo);
2281: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2283: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2284: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2286: if (alg_scalable) {
2287: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2288: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2289: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2290: }
2291: }
2292: }
2294: /* Get runtime option */
2295: if (product->api_user) {
2296: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2297: PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2298: PetscOptionsEnd();
2299: } else {
2300: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2301: PetscOptionsEList("-matproduct_ab_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2302: PetscOptionsEnd();
2303: }
2304: if (flg) {
2305: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2306: }
2308: C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2309: return(0);
2310: }
2312: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2313: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2314: {
2316: Mat_Product *product = C->product;
2317: Mat A=product->A,B=product->B;
2318: const char *algTypes[3] = {"scalable","nonscalable","at*b"};
2319: PetscInt nalg = 3;
2320: PetscInt alg = 1; /* set default algorithm */
2321: PetscBool flg;
2322: MPI_Comm comm;
2325: /* Check matrix local sizes */
2326: PetscObjectGetComm((PetscObject)C,&comm);
2327: if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2329: /* Set default algorithm */
2330: PetscStrcmp(C->product->alg,"default",&flg);
2331: if (flg) {
2332: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2333: }
2335: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2336: if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2337: MatInfo Ainfo,Binfo;
2338: PetscInt nz_local;
2339: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2341: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2342: MatGetInfo(B,MAT_LOCAL,&Binfo);
2343: nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2345: if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2346: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2348: if (alg_scalable) {
2349: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2350: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2351: PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);
2352: }
2353: }
2355: /* Get runtime option */
2356: if (product->api_user) {
2357: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2358: PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2359: PetscOptionsEnd();
2360: } else {
2361: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2362: PetscOptionsEList("-matproduct_atb_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2363: PetscOptionsEnd();
2364: }
2365: if (flg) {
2366: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2367: }
2369: C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2370: return(0);
2371: }
2373: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2374: {
2376: Mat_Product *product = C->product;
2377: Mat A=product->A,P=product->B;
2378: MPI_Comm comm;
2379: PetscBool flg;
2380: PetscInt alg=1; /* set default algorithm */
2381: #if !defined(PETSC_HAVE_HYPRE)
2382: const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"};
2383: PetscInt nalg=4;
2384: #else
2385: const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"};
2386: PetscInt nalg=5;
2387: #endif
2388: PetscInt pN=P->cmap->N;
2391: /* Check matrix local sizes */
2392: PetscObjectGetComm((PetscObject)C,&comm);
2393: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
2394: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
2396: /* Set "nonscalable" as default algorithm */
2397: PetscStrcmp(C->product->alg,"default",&flg);
2398: if (flg) {
2399: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2401: /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2402: if (pN > 100000) {
2403: MatInfo Ainfo,Pinfo;
2404: PetscInt nz_local;
2405: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
2407: MatGetInfo(A,MAT_LOCAL,&Ainfo);
2408: MatGetInfo(P,MAT_LOCAL,&Pinfo);
2409: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2411: if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2412: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
2414: if (alg_scalable) {
2415: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2416: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2417: }
2418: }
2419: }
2421: /* Get runtime option */
2422: if (product->api_user) {
2423: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2424: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2425: PetscOptionsEnd();
2426: } else {
2427: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2428: PetscOptionsEList("-matproduct_ptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
2429: PetscOptionsEnd();
2430: }
2431: if (flg) {
2432: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2433: }
2435: C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2436: return(0);
2437: }
2439: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2440: {
2441: Mat_Product *product = C->product;
2442: Mat A = product->A,R=product->B;
2445: /* Check matrix local sizes */
2446: if (A->cmap->n != R->cmap->n || A->rmap->n != R->cmap->n) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%D, %D), R local (%D,%D)",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n);
2448: C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2449: return(0);
2450: }
2452: /*
2453: Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2454: */
2455: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2456: {
2458: Mat_Product *product = C->product;
2459: PetscBool flg = PETSC_FALSE;
2460: PetscInt alg = 1; /* default algorithm */
2461: const char *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2462: PetscInt nalg = 3;
2465: /* Set default algorithm */
2466: PetscStrcmp(C->product->alg,"default",&flg);
2467: if (flg) {
2468: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2469: }
2471: /* Get runtime option */
2472: if (product->api_user) {
2473: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2474: PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);
2475: PetscOptionsEnd();
2476: } else {
2477: PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2478: PetscOptionsEList("-matproduct_abc_via","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);
2479: PetscOptionsEnd();
2480: }
2481: if (flg) {
2482: MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);
2483: }
2485: C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2486: C->ops->productsymbolic = MatProductSymbolic_ABC;
2487: return(0);
2488: }
2490: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2491: {
2493: Mat_Product *product = C->product;
2496: switch (product->type) {
2497: case MATPRODUCT_AB:
2498: MatProductSetFromOptions_MPIAIJ_AB(C);
2499: break;
2500: case MATPRODUCT_AtB:
2501: MatProductSetFromOptions_MPIAIJ_AtB(C);
2502: break;
2503: case MATPRODUCT_PtAP:
2504: MatProductSetFromOptions_MPIAIJ_PtAP(C);
2505: break;
2506: case MATPRODUCT_RARt:
2507: MatProductSetFromOptions_MPIAIJ_RARt(C);
2508: break;
2509: case MATPRODUCT_ABC:
2510: MatProductSetFromOptions_MPIAIJ_ABC(C);
2511: break;
2512: default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"MatProduct type %s is not supported for MPIAIJ and MPIAIJ matrices",MatProductTypes[product->type]);
2513: }
2514: return(0);
2515: }