Actual source code: mpiptap.c
petsc-3.11.4 2019-09-28
2: /*
3: Defines projective product routines where A is a MPIAIJ matrix
4: C = P^T * A * P
5: */
7: #include <../src/mat/impls/aij/seq/aij.h>
8: #include <../src/mat/utils/freespace.h>
9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
10: #include <petscbt.h>
11: #include <petsctime.h>
13: /* #define PTAP_PROFILE */
15: PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16: {
17: PetscErrorCode ierr;
18: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
19: Mat_APMPI *ptap=a->ap;
20: PetscBool iascii;
21: PetscViewerFormat format;
24: if (!ptap) {
25: /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */
26: A->ops->view = MatView_MPIAIJ;
27: (A->ops->view)(A,viewer);
28: return(0);
29: }
31: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
32: if (iascii) {
33: PetscViewerGetFormat(viewer,&format);
34: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
35: if (ptap->algType == 0) {
36: PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");
37: } else if (ptap->algType == 1) {
38: PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");
39: }
40: }
41: }
42: (ptap->view)(A,viewer);
43: return(0);
44: }
46: PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
47: {
48: PetscErrorCode ierr;
49: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
50: Mat_APMPI *ptap=a->ap;
51: Mat_Merge_SeqsToMPI *merge;
54: if (!ptap) return(0);
56: PetscFree2(ptap->startsj_s,ptap->startsj_r);
57: PetscFree(ptap->bufa);
58: MatDestroy(&ptap->P_loc);
59: MatDestroy(&ptap->P_oth);
60: MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
61: MatDestroy(&ptap->Rd);
62: MatDestroy(&ptap->Ro);
63: if (ptap->AP_loc) { /* used by alg_rap */
64: Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
65: PetscFree(ap->i);
66: PetscFree2(ap->j,ap->a);
67: MatDestroy(&ptap->AP_loc);
68: } else { /* used by alg_ptap */
69: PetscFree(ptap->api);
70: PetscFree(ptap->apj);
71: }
72: MatDestroy(&ptap->C_loc);
73: MatDestroy(&ptap->C_oth);
74: if (ptap->apa) {PetscFree(ptap->apa);}
76: MatDestroy(&ptap->Pt);
78: merge=ptap->merge;
79: if (merge) { /* used by alg_ptap */
80: PetscFree(merge->id_r);
81: PetscFree(merge->len_s);
82: PetscFree(merge->len_r);
83: PetscFree(merge->bi);
84: PetscFree(merge->bj);
85: PetscFree(merge->buf_ri[0]);
86: PetscFree(merge->buf_ri);
87: PetscFree(merge->buf_rj[0]);
88: PetscFree(merge->buf_rj);
89: PetscFree(merge->coi);
90: PetscFree(merge->coj);
91: PetscFree(merge->owners_co);
92: PetscLayoutDestroy(&merge->rowmap);
93: PetscFree(ptap->merge);
94: }
95: ISLocalToGlobalMappingDestroy(&ptap->ltog);
97: A->ops->destroy = ptap->destroy;
98: PetscFree(a->ap);
99: return(0);
100: }
102: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
103: {
107: (*A->ops->freeintermediatedatastructures)(A);
108: (*A->ops->destroy)(A);
109: return(0);
110: }
112: PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
113: {
115: PetscBool flg;
116: MPI_Comm comm;
117: #if !defined(PETSC_HAVE_HYPRE)
118: const char *algTypes[2] = {"scalable","nonscalable"};
119: PetscInt nalg=2;
120: #else
121: const char *algTypes[3] = {"scalable","nonscalable","hypre"};
122: PetscInt nalg=3;
123: #endif
124: PetscInt pN=P->cmap->N,alg=1; /* set default algorithm */
127: /* check if matrix local sizes are compatible */
128: PetscObjectGetComm((PetscObject)A,&comm);
129: if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
130: if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
132: if (scall == MAT_INITIAL_MATRIX) {
133: /* pick an algorithm */
134: PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");
135: PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);
136: PetscOptionsEnd();
138: if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
139: MatInfo Ainfo,Pinfo;
140: PetscInt nz_local;
141: PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable;
143: MatGetInfo(A,MAT_LOCAL,&Ainfo);
144: MatGetInfo(P,MAT_LOCAL,&Pinfo);
145: nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
147: if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
148: MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);
150: if (alg_scalable) {
151: alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
152: }
153: }
155: switch (alg) {
156: case 1:
157: /* do R=P^T locally, then C=R*A*P -- nonscalable */
158: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
159: MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
160: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
161: break;
162: #if defined(PETSC_HAVE_HYPRE)
163: case 2:
164: /* Use boomerAMGBuildCoarseOperator */
165: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
166: MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);
167: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
168: break;
169: #endif
170: default:
171: /* do R=P^T locally, then C=R*A*P */
172: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
173: MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);
174: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
175: break;
176: }
178: if (alg == 0 || alg == 1) {
179: Mat_MPIAIJ *c = (Mat_MPIAIJ*)(*C)->data;
180: Mat_APMPI *ap = c->ap;
181: PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");
182: ap->freestruct = PETSC_FALSE;
183: PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);
184: PetscOptionsEnd();
185: }
186: }
188: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
189: (*(*C)->ops->ptapnumeric)(A,P,*C);
190: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
191: return(0);
192: }
194: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
195: {
196: PetscErrorCode ierr;
197: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
198: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
199: Mat_SeqAIJ *ap,*p_loc,*p_oth,*c_seq;
200: Mat_APMPI *ptap = c->ap;
201: Mat AP_loc,C_loc,C_oth;
202: PetscInt i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz,nout;
203: PetscScalar *apa;
204: const PetscInt *cols;
205: const PetscScalar *vals;
208: if (!ptap) {
209: MPI_Comm comm;
210: PetscObjectGetComm((PetscObject)C,&comm);
211: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
212: }
214: MatZeroEntries(C);
216: /* 1) get R = Pd^T,Ro = Po^T */
217: if (ptap->reuse == MAT_REUSE_MATRIX) {
218: MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
219: MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
220: }
222: /* 2) get AP_loc */
223: AP_loc = ptap->AP_loc;
224: ap = (Mat_SeqAIJ*)AP_loc->data;
226: /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
227: /*-----------------------------------------------------*/
228: if (ptap->reuse == MAT_REUSE_MATRIX) {
229: /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
230: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
231: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
232: }
234: /* 2-2) compute numeric A_loc*P - dominating part */
235: /* ---------------------------------------------- */
236: /* get data from symbolic products */
237: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
238: if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
239: else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
241: api = ap->i;
242: apj = ap->j;
243: ISLocalToGlobalMappingApply(ptap->ltog,api[AP_loc->rmap->n],apj,apj);
244: for (i=0; i<am; i++) {
245: /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
246: apnz = api[i+1] - api[i];
247: apa = ap->a + api[i];
248: PetscMemzero(apa,sizeof(PetscScalar)*apnz);
249: AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
250: PetscLogFlops(2.0*apnz);
251: }
252: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,api[AP_loc->rmap->n],apj,&nout,apj);
253: if (api[AP_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",api[AP_loc->rmap->n],nout);
255: /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
256: /* Always use scalable version since we are in the MPI scalable version */
257: MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);
258: MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);
260: C_loc = ptap->C_loc;
261: C_oth = ptap->C_oth;
263: /* add C_loc and Co to to C */
264: MatGetOwnershipRange(C,&rstart,&rend);
266: /* C_loc -> C */
267: cm = C_loc->rmap->N;
268: c_seq = (Mat_SeqAIJ*)C_loc->data;
269: cols = c_seq->j;
270: vals = c_seq->a;
271: ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_loc->rmap->n],c_seq->j,c_seq->j);
273: /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
274: /* when there are no off-processor parts. */
275: /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
276: /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
277: /* a table, and other, more complex stuff has to be done. */
278: if (C->assembled) {
279: C->was_assembled = PETSC_TRUE;
280: C->assembled = PETSC_FALSE;
281: }
282: if (C->was_assembled) {
283: for (i=0; i<cm; i++) {
284: ncols = c_seq->i[i+1] - c_seq->i[i];
285: row = rstart + i;
286: MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);
287: cols += ncols; vals += ncols;
288: }
289: } else {
290: MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);
291: }
292: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_loc->rmap->n],c_seq->j,&nout,c_seq->j);
293: if (c_seq->i[C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
295: /* Co -> C, off-processor part */
296: cm = C_oth->rmap->N;
297: c_seq = (Mat_SeqAIJ*)C_oth->data;
298: cols = c_seq->j;
299: vals = c_seq->a;
300: ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_oth->rmap->n],c_seq->j,c_seq->j);
301: for (i=0; i<cm; i++) {
302: ncols = c_seq->i[i+1] - c_seq->i[i];
303: row = p->garray[i];
304: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
305: cols += ncols; vals += ncols;
306: }
307: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
308: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
310: ptap->reuse = MAT_REUSE_MATRIX;
312: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_oth->rmap->n],c_seq->j,&nout,c_seq->j);
313: if (c_seq->i[C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
315: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
316: if (ptap->freestruct) {
317: MatFreeIntermediateDataStructures(C);
318: }
319: return(0);
320: }
322: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
323: {
324: PetscErrorCode ierr;
325: Mat_APMPI *ptap;
326: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
327: MPI_Comm comm;
328: PetscMPIInt size,rank;
329: Mat Cmpi,P_loc,P_oth;
330: PetscFreeSpaceList free_space=NULL,current_space=NULL;
331: PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
332: PetscInt *lnk,i,k,pnz,row,nsend;
333: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
334: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
335: PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
336: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
337: MPI_Request *swaits,*rwaits;
338: MPI_Status *sstatus,rstatus;
339: PetscLayout rowmap;
340: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
341: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
342: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi,nout;
343: Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
344: PetscScalar *apv;
345: PetscTable ta;
346: MatType mtype;
347: const char *prefix;
348: #if defined(PETSC_USE_INFO)
349: PetscReal apfill;
350: #endif
353: PetscObjectGetComm((PetscObject)A,&comm);
354: MPI_Comm_size(comm,&size);
355: MPI_Comm_rank(comm,&rank);
357: if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
359: /* create symbolic parallel matrix Cmpi */
360: MatCreate(comm,&Cmpi);
361: MatGetType(A,&mtype);
362: MatSetType(Cmpi,mtype);
364: /* create struct Mat_APMPI and attached it to C later */
365: PetscNew(&ptap);
366: ptap->reuse = MAT_INITIAL_MATRIX;
367: ptap->algType = 0;
369: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
370: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);
371: /* get P_loc by taking all local rows of P */
372: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);
374: ptap->P_loc = P_loc;
375: ptap->P_oth = P_oth;
377: /* (0) compute Rd = Pd^T, Ro = Po^T */
378: /* --------------------------------- */
379: MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
380: MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
382: /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
383: /* ----------------------------------------------------------------- */
384: p_loc = (Mat_SeqAIJ*)P_loc->data;
385: if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
387: /* create and initialize a linked list */
388: PetscTableCreate(pn,pN,&ta); /* for compute AP_loc and Cmpi */
389: MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
390: MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
391: PetscTableGetCount(ta,&Crmax); /* Crmax = nnz(sum of Prows) */
393: PetscLLCondensedCreate_Scalable(Crmax,&lnk);
395: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
396: if (ao) {
397: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);
398: } else {
399: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);
400: }
401: current_space = free_space;
402: nspacedouble = 0;
404: PetscMalloc1(am+1,&api);
405: api[0] = 0;
406: for (i=0; i<am; i++) {
407: /* diagonal portion: Ad[i,:]*P */
408: ai = ad->i; pi = p_loc->i;
409: nzi = ai[i+1] - ai[i];
410: aj = ad->j + ai[i];
411: for (j=0; j<nzi; j++) {
412: row = aj[j];
413: pnz = pi[row+1] - pi[row];
414: Jptr = p_loc->j + pi[row];
415: /* add non-zero cols of P into the sorted linked list lnk */
416: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
417: }
418: /* off-diagonal portion: Ao[i,:]*P */
419: if (ao) {
420: ai = ao->i; pi = p_oth->i;
421: nzi = ai[i+1] - ai[i];
422: aj = ao->j + ai[i];
423: for (j=0; j<nzi; j++) {
424: row = aj[j];
425: pnz = pi[row+1] - pi[row];
426: Jptr = p_oth->j + pi[row];
427: PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);
428: }
429: }
430: apnz = lnk[0];
431: api[i+1] = api[i] + apnz;
433: /* if free space is not available, double the total space in the list */
434: if (current_space->local_remaining<apnz) {
435: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
436: nspacedouble++;
437: }
439: /* Copy data into free space, then initialize lnk */
440: PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);
442: current_space->array += apnz;
443: current_space->local_used += apnz;
444: current_space->local_remaining -= apnz;
445: }
446: /* Allocate space for apj and apv, initialize apj, and */
447: /* destroy list of free space and other temporary array(s) */
448: PetscCalloc2(api[am],&apj,api[am],&apv);
449: PetscFreeSpaceContiguous(&free_space,apj);
450: PetscLLCondensedDestroy_Scalable(lnk);
452: /* Create AP_loc for reuse */
453: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);
454: MatSeqAIJCompactOutExtraColumns_SeqAIJ(ptap->AP_loc, &ptap->ltog);
456: #if defined(PETSC_USE_INFO)
457: if (ao) {
458: apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
459: } else {
460: apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
461: }
462: ptap->AP_loc->info.mallocs = nspacedouble;
463: ptap->AP_loc->info.fill_ratio_given = fill;
464: ptap->AP_loc->info.fill_ratio_needed = apfill;
466: if (api[am]) {
467: PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);
468: PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);
469: } else {
470: PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");
471: }
472: #endif
474: /* (2-1) compute symbolic Co = Ro*AP_loc */
475: /* ------------------------------------ */
476: MatGetOptionsPrefix(A,&prefix);
477: MatSetOptionsPrefix(ptap->Ro,prefix);
478: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
479: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);
481: /* (3) send coj of C_oth to other processors */
482: /* ------------------------------------------ */
483: /* determine row ownership */
484: PetscLayoutCreate(comm,&rowmap);
485: rowmap->n = pn;
486: rowmap->bs = 1;
487: PetscLayoutSetUp(rowmap);
488: owners = rowmap->range;
490: /* determine the number of messages to send, their lengths */
491: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
492: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
493: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
495: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
496: coi = c_oth->i; coj = c_oth->j;
497: con = ptap->C_oth->rmap->n;
498: proc = 0;
499: ISLocalToGlobalMappingApply(ptap->ltog,coi[con],coj,coj);
500: for (i=0; i<con; i++) {
501: while (prmap[i] >= owners[proc+1]) proc++;
502: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
503: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
504: }
506: len = 0; /* max length of buf_si[], see (4) */
507: owners_co[0] = 0;
508: nsend = 0;
509: for (proc=0; proc<size; proc++) {
510: owners_co[proc+1] = owners_co[proc] + len_si[proc];
511: if (len_s[proc]) {
512: nsend++;
513: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
514: len += len_si[proc];
515: }
516: }
518: /* determine the number and length of messages to receive for coi and coj */
519: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
520: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
522: /* post the Irecv and Isend of coj */
523: PetscCommGetNewTag(comm,&tagj);
524: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
525: PetscMalloc1(nsend+1,&swaits);
526: for (proc=0, k=0; proc<size; proc++) {
527: if (!len_s[proc]) continue;
528: i = owners_co[proc];
529: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
530: k++;
531: }
533: /* (2-2) compute symbolic C_loc = Rd*AP_loc */
534: /* ---------------------------------------- */
535: MatSetOptionsPrefix(ptap->Rd,prefix);
536: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
537: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);
538: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
539: ISLocalToGlobalMappingApply(ptap->ltog,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,c_loc->j);
541: /* receives coj are complete */
542: for (i=0; i<nrecv; i++) {
543: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
544: }
545: PetscFree(rwaits);
546: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
548: /* add received column indices into ta to update Crmax */
549: for (k=0; k<nrecv; k++) {/* k-th received message */
550: Jptr = buf_rj[k];
551: for (j=0; j<len_r[k]; j++) {
552: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
553: }
554: }
555: PetscTableGetCount(ta,&Crmax);
556: PetscTableDestroy(&ta);
558: /* (4) send and recv coi */
559: /*-----------------------*/
560: PetscCommGetNewTag(comm,&tagi);
561: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
562: PetscMalloc1(len+1,&buf_s);
563: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
564: for (proc=0,k=0; proc<size; proc++) {
565: if (!len_s[proc]) continue;
566: /* form outgoing message for i-structure:
567: buf_si[0]: nrows to be sent
568: [1:nrows]: row index (global)
569: [nrows+1:2*nrows+1]: i-structure index
570: */
571: /*-------------------------------------------*/
572: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
573: buf_si_i = buf_si + nrows+1;
574: buf_si[0] = nrows;
575: buf_si_i[0] = 0;
576: nrows = 0;
577: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
578: nzi = coi[i+1] - coi[i];
579: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
580: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
581: nrows++;
582: }
583: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
584: k++;
585: buf_si += len_si[proc];
586: }
587: for (i=0; i<nrecv; i++) {
588: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
589: }
590: PetscFree(rwaits);
591: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
593: PetscFree4(len_s,len_si,sstatus,owners_co);
594: PetscFree(len_ri);
595: PetscFree(swaits);
596: PetscFree(buf_s);
598: /* (5) compute the local portion of Cmpi */
599: /* ------------------------------------------ */
600: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
601: PetscFreeSpaceGet(Crmax,&free_space);
602: current_space = free_space;
604: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
605: for (k=0; k<nrecv; k++) {
606: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
607: nrows = *buf_ri_k[k];
608: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
609: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
610: }
612: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
613: PetscLLCondensedCreate_Scalable(Crmax,&lnk);
614: for (i=0; i<pn; i++) {
615: /* add C_loc into Cmpi */
616: nzi = c_loc->i[i+1] - c_loc->i[i];
617: Jptr = c_loc->j + c_loc->i[i];
618: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
620: /* add received col data into lnk */
621: for (k=0; k<nrecv; k++) { /* k-th received message */
622: if (i == *nextrow[k]) { /* i-th row */
623: nzi = *(nextci[k]+1) - *nextci[k];
624: Jptr = buf_rj[k] + *nextci[k];
625: PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);
626: nextrow[k]++; nextci[k]++;
627: }
628: }
629: nzi = lnk[0];
631: /* copy data into free space, then initialize lnk */
632: PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);
633: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
634: }
635: PetscFree3(buf_ri_k,nextrow,nextci);
636: PetscLLCondensedDestroy_Scalable(lnk);
637: PetscFreeSpaceDestroy(free_space);
639: /* local sizes and preallocation */
640: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
641: if (P->cmap->bs > 0) {
642: PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);
643: PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);
644: }
645: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
646: MatPreallocateFinalize(dnz,onz);
648: /* members in merge */
649: PetscFree(id_r);
650: PetscFree(len_r);
651: PetscFree(buf_ri[0]);
652: PetscFree(buf_ri);
653: PetscFree(buf_rj[0]);
654: PetscFree(buf_rj);
655: PetscLayoutDestroy(&rowmap);
657: /* attach the supporting struct to Cmpi for reuse */
658: c = (Mat_MPIAIJ*)Cmpi->data;
659: c->ap = ptap;
660: ptap->duplicate = Cmpi->ops->duplicate;
661: ptap->destroy = Cmpi->ops->destroy;
662: ptap->view = Cmpi->ops->view;
664: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
665: Cmpi->assembled = PETSC_FALSE;
666: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
667: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
668: Cmpi->ops->view = MatView_MPIAIJ_PtAP;
669: Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
670: *C = Cmpi;
672: nout = 0;
673: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_oth->i[ptap->C_oth->rmap->n],c_oth->j,&nout,c_oth->j);
674: if (c_oth->i[ptap->C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_oth->i[ptap->C_oth->rmap->n],nout);
675: ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,&nout,c_loc->j);
676: if (c_loc->i[ptap->C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_loc->i[ptap->C_loc->rmap->n],nout);
678: return(0);
679: }
681: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
682: {
683: PetscErrorCode ierr;
684: Mat_APMPI *ptap;
685: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
686: MPI_Comm comm;
687: PetscMPIInt size,rank;
688: Mat Cmpi;
689: PetscFreeSpaceList free_space=NULL,current_space=NULL;
690: PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
691: PetscInt *lnk,i,k,pnz,row,nsend;
692: PetscBT lnkbt;
693: PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
694: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
695: PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
696: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
697: MPI_Request *swaits,*rwaits;
698: MPI_Status *sstatus,rstatus;
699: PetscLayout rowmap;
700: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
701: PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
702: PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
703: Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
704: PetscScalar *apv;
705: PetscTable ta;
706: MatType mtype;
707: const char *prefix;
708: #if defined(PETSC_USE_INFO)
709: PetscReal apfill;
710: #endif
713: PetscObjectGetComm((PetscObject)A,&comm);
714: MPI_Comm_size(comm,&size);
715: MPI_Comm_rank(comm,&rank);
717: if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
719: /* create symbolic parallel matrix Cmpi */
720: MatCreate(comm,&Cmpi);
721: MatGetType(A,&mtype);
722: MatSetType(Cmpi,mtype);
724: /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
725: Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
727: /* create struct Mat_APMPI and attached it to C later */
728: PetscNew(&ptap);
729: ptap->reuse = MAT_INITIAL_MATRIX;
730: ptap->algType = 1;
732: /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
733: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
734: /* get P_loc by taking all local rows of P */
735: MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);
737: /* (0) compute Rd = Pd^T, Ro = Po^T */
738: /* --------------------------------- */
739: MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);
740: MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);
742: /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
743: /* ----------------------------------------------------------------- */
744: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
745: if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
747: /* create and initialize a linked list */
748: PetscTableCreate(pn,pN,&ta); /* for compute AP_loc and Cmpi */
749: MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
750: MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
751: PetscTableGetCount(ta,&Crmax); /* Crmax = nnz(sum of Prows) */
752: /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
754: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
756: /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
757: if (ao) {
758: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);
759: } else {
760: PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);
761: }
762: current_space = free_space;
763: nspacedouble = 0;
765: PetscMalloc1(am+1,&api);
766: api[0] = 0;
767: for (i=0; i<am; i++) {
768: /* diagonal portion: Ad[i,:]*P */
769: ai = ad->i; pi = p_loc->i;
770: nzi = ai[i+1] - ai[i];
771: aj = ad->j + ai[i];
772: for (j=0; j<nzi; j++) {
773: row = aj[j];
774: pnz = pi[row+1] - pi[row];
775: Jptr = p_loc->j + pi[row];
776: /* add non-zero cols of P into the sorted linked list lnk */
777: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
778: }
779: /* off-diagonal portion: Ao[i,:]*P */
780: if (ao) {
781: ai = ao->i; pi = p_oth->i;
782: nzi = ai[i+1] - ai[i];
783: aj = ao->j + ai[i];
784: for (j=0; j<nzi; j++) {
785: row = aj[j];
786: pnz = pi[row+1] - pi[row];
787: Jptr = p_oth->j + pi[row];
788: PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
789: }
790: }
791: apnz = lnk[0];
792: api[i+1] = api[i] + apnz;
793: if (ap_rmax < apnz) ap_rmax = apnz;
795: /* if free space is not available, double the total space in the list */
796: if (current_space->local_remaining<apnz) {
797: PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);
798: nspacedouble++;
799: }
801: /* Copy data into free space, then initialize lnk */
802: PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);
804: current_space->array += apnz;
805: current_space->local_used += apnz;
806: current_space->local_remaining -= apnz;
807: }
808: /* Allocate space for apj and apv, initialize apj, and */
809: /* destroy list of free space and other temporary array(s) */
810: PetscMalloc2(api[am],&apj,api[am],&apv);
811: PetscFreeSpaceContiguous(&free_space,apj);
812: PetscLLDestroy(lnk,lnkbt);
814: /* Create AP_loc for reuse */
815: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);
817: #if defined(PETSC_USE_INFO)
818: if (ao) {
819: apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
820: } else {
821: apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
822: }
823: ptap->AP_loc->info.mallocs = nspacedouble;
824: ptap->AP_loc->info.fill_ratio_given = fill;
825: ptap->AP_loc->info.fill_ratio_needed = apfill;
827: if (api[am]) {
828: PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);
829: PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);
830: } else {
831: PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");
832: }
833: #endif
835: /* (2-1) compute symbolic Co = Ro*AP_loc */
836: /* ------------------------------------ */
837: MatGetOptionsPrefix(A,&prefix);
838: MatSetOptionsPrefix(ptap->Ro,prefix);
839: MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");
840: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);
842: /* (3) send coj of C_oth to other processors */
843: /* ------------------------------------------ */
844: /* determine row ownership */
845: PetscLayoutCreate(comm,&rowmap);
846: rowmap->n = pn;
847: rowmap->bs = 1;
848: PetscLayoutSetUp(rowmap);
849: owners = rowmap->range;
851: /* determine the number of messages to send, their lengths */
852: PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);
853: PetscMemzero(len_s,size*sizeof(PetscMPIInt));
854: PetscMemzero(len_si,size*sizeof(PetscMPIInt));
856: c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
857: coi = c_oth->i; coj = c_oth->j;
858: con = ptap->C_oth->rmap->n;
859: proc = 0;
860: for (i=0; i<con; i++) {
861: while (prmap[i] >= owners[proc+1]) proc++;
862: len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */
863: len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
864: }
866: len = 0; /* max length of buf_si[], see (4) */
867: owners_co[0] = 0;
868: nsend = 0;
869: for (proc=0; proc<size; proc++) {
870: owners_co[proc+1] = owners_co[proc] + len_si[proc];
871: if (len_s[proc]) {
872: nsend++;
873: len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
874: len += len_si[proc];
875: }
876: }
878: /* determine the number and length of messages to receive for coi and coj */
879: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);
880: PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);
882: /* post the Irecv and Isend of coj */
883: PetscCommGetNewTag(comm,&tagj);
884: PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);
885: PetscMalloc1(nsend+1,&swaits);
886: for (proc=0, k=0; proc<size; proc++) {
887: if (!len_s[proc]) continue;
888: i = owners_co[proc];
889: MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
890: k++;
891: }
893: /* (2-2) compute symbolic C_loc = Rd*AP_loc */
894: /* ---------------------------------------- */
895: MatSetOptionsPrefix(ptap->Rd,prefix);
896: MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");
897: MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);
898: c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
900: /* receives coj are complete */
901: for (i=0; i<nrecv; i++) {
902: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
903: }
904: PetscFree(rwaits);
905: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
907: /* add received column indices into ta to update Crmax */
908: for (k=0; k<nrecv; k++) {/* k-th received message */
909: Jptr = buf_rj[k];
910: for (j=0; j<len_r[k]; j++) {
911: PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);
912: }
913: }
914: PetscTableGetCount(ta,&Crmax);
915: PetscTableDestroy(&ta);
917: /* (4) send and recv coi */
918: /*-----------------------*/
919: PetscCommGetNewTag(comm,&tagi);
920: PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);
921: PetscMalloc1(len+1,&buf_s);
922: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
923: for (proc=0,k=0; proc<size; proc++) {
924: if (!len_s[proc]) continue;
925: /* form outgoing message for i-structure:
926: buf_si[0]: nrows to be sent
927: [1:nrows]: row index (global)
928: [nrows+1:2*nrows+1]: i-structure index
929: */
930: /*-------------------------------------------*/
931: nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
932: buf_si_i = buf_si + nrows+1;
933: buf_si[0] = nrows;
934: buf_si_i[0] = 0;
935: nrows = 0;
936: for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
937: nzi = coi[i+1] - coi[i];
938: buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
939: buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */
940: nrows++;
941: }
942: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
943: k++;
944: buf_si += len_si[proc];
945: }
946: for (i=0; i<nrecv; i++) {
947: MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);
948: }
949: PetscFree(rwaits);
950: if (nsend) {MPI_Waitall(nsend,swaits,sstatus);}
952: PetscFree4(len_s,len_si,sstatus,owners_co);
953: PetscFree(len_ri);
954: PetscFree(swaits);
955: PetscFree(buf_s);
957: /* (5) compute the local portion of Cmpi */
958: /* ------------------------------------------ */
959: /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
960: PetscFreeSpaceGet(Crmax,&free_space);
961: current_space = free_space;
963: PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);
964: for (k=0; k<nrecv; k++) {
965: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
966: nrows = *buf_ri_k[k];
967: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
968: nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
969: }
971: MatPreallocateInitialize(comm,pn,pn,dnz,onz);
972: PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);
973: for (i=0; i<pn; i++) {
974: /* add C_loc into Cmpi */
975: nzi = c_loc->i[i+1] - c_loc->i[i];
976: Jptr = c_loc->j + c_loc->i[i];
977: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
979: /* add received col data into lnk */
980: for (k=0; k<nrecv; k++) { /* k-th received message */
981: if (i == *nextrow[k]) { /* i-th row */
982: nzi = *(nextci[k]+1) - *nextci[k];
983: Jptr = buf_rj[k] + *nextci[k];
984: PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
985: nextrow[k]++; nextci[k]++;
986: }
987: }
988: nzi = lnk[0];
990: /* copy data into free space, then initialize lnk */
991: PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);
992: MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);
993: }
994: PetscFree3(buf_ri_k,nextrow,nextci);
995: PetscLLDestroy(lnk,lnkbt);
996: PetscFreeSpaceDestroy(free_space);
998: /* local sizes and preallocation */
999: MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
1000: if (P->cmap->bs > 0) {
1001: PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);
1002: PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);
1003: }
1004: MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
1005: MatPreallocateFinalize(dnz,onz);
1007: /* members in merge */
1008: PetscFree(id_r);
1009: PetscFree(len_r);
1010: PetscFree(buf_ri[0]);
1011: PetscFree(buf_ri);
1012: PetscFree(buf_rj[0]);
1013: PetscFree(buf_rj);
1014: PetscLayoutDestroy(&rowmap);
1016: /* attach the supporting struct to Cmpi for reuse */
1017: c = (Mat_MPIAIJ*)Cmpi->data;
1018: c->ap = ptap;
1019: ptap->duplicate = Cmpi->ops->duplicate;
1020: ptap->destroy = Cmpi->ops->destroy;
1021: ptap->view = Cmpi->ops->view;
1022: PetscCalloc1(pN,&ptap->apa);
1024: /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
1025: Cmpi->assembled = PETSC_FALSE;
1026: Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP;
1027: Cmpi->ops->view = MatView_MPIAIJ_PtAP;
1028: Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
1029: *C = Cmpi;
1030: return(0);
1031: }
1033: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
1034: {
1035: PetscErrorCode ierr;
1036: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1037: Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
1038: Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq;
1039: Mat_APMPI *ptap = c->ap;
1040: Mat AP_loc,C_loc,C_oth;
1041: PetscInt i,rstart,rend,cm,ncols,row;
1042: PetscInt *api,*apj,am = A->rmap->n,j,col,apnz;
1043: PetscScalar *apa;
1044: const PetscInt *cols;
1045: const PetscScalar *vals;
1048: if (!ptap) {
1049: MPI_Comm comm;
1050: PetscObjectGetComm((PetscObject)C,&comm);
1051: SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1052: }
1054: MatZeroEntries(C);
1055: /* 1) get R = Pd^T,Ro = Po^T */
1056: if (ptap->reuse == MAT_REUSE_MATRIX) {
1057: MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);
1058: MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);
1059: }
1061: /* 2) get AP_loc */
1062: AP_loc = ptap->AP_loc;
1063: ap = (Mat_SeqAIJ*)AP_loc->data;
1065: /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */
1066: /*-----------------------------------------------------*/
1067: if (ptap->reuse == MAT_REUSE_MATRIX) {
1068: /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1069: MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
1070: MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
1071: }
1073: /* 2-2) compute numeric A_loc*P - dominating part */
1074: /* ---------------------------------------------- */
1075: /* get data from symbolic products */
1076: p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1077: if (ptap->P_oth) {
1078: p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1079: }
1080: apa = ptap->apa;
1081: api = ap->i;
1082: apj = ap->j;
1083: for (i=0; i<am; i++) {
1084: /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1085: AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1086: apnz = api[i+1] - api[i];
1087: for (j=0; j<apnz; j++) {
1088: col = apj[j+api[i]];
1089: ap->a[j+ap->i[i]] = apa[col];
1090: apa[col] = 0.0;
1091: }
1092: PetscLogFlops(2.0*apnz);
1093: }
1095: /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1096: ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);
1097: ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);
1098: C_loc = ptap->C_loc;
1099: C_oth = ptap->C_oth;
1101: /* add C_loc and Co to to C */
1102: MatGetOwnershipRange(C,&rstart,&rend);
1104: /* C_loc -> C */
1105: cm = C_loc->rmap->N;
1106: c_seq = (Mat_SeqAIJ*)C_loc->data;
1107: cols = c_seq->j;
1108: vals = c_seq->a;
1111: /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1112: /* when there are no off-processor parts. */
1113: /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1114: /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1115: /* a table, and other, more complex stuff has to be done. */
1116: if (C->assembled) {
1117: C->was_assembled = PETSC_TRUE;
1118: C->assembled = PETSC_FALSE;
1119: }
1120: if (C->was_assembled) {
1121: for (i=0; i<cm; i++) {
1122: ncols = c_seq->i[i+1] - c_seq->i[i];
1123: row = rstart + i;
1124: MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);
1125: cols += ncols; vals += ncols;
1126: }
1127: } else {
1128: MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);
1129: }
1131: /* Co -> C, off-processor part */
1132: cm = C_oth->rmap->N;
1133: c_seq = (Mat_SeqAIJ*)C_oth->data;
1134: cols = c_seq->j;
1135: vals = c_seq->a;
1136: for (i=0; i<cm; i++) {
1137: ncols = c_seq->i[i+1] - c_seq->i[i];
1138: row = p->garray[i];
1139: MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);
1140: cols += ncols; vals += ncols;
1141: }
1143: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
1144: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
1146: ptap->reuse = MAT_REUSE_MATRIX;
1148: /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1149: if (ptap->freestruct) {
1150: MatFreeIntermediateDataStructures(C);
1151: }
1152: return(0);
1153: }