Actual source code: mpiaij.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: #include <petsc/private/vecimpl.h>
  3: #include <petsc/private/sfimpl.h>
  4: #include <petsc/private/isimpl.h>
  5: #include <petscblaslapack.h>
  6: #include <petscsf.h>
  7: #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 54:   A->boundtocpu = flg;
 55: #endif
 56:   if (a->A) {
 57:     MatBindToCPU(a->A,flg);
 58:   }
 59:   if (a->B) {
 60:     MatBindToCPU(a->B,flg);
 61:   }
 62:   return(0);
 63: }


 66: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 67: {
 69:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 72:   if (mat->A) {
 73:     MatSetBlockSizes(mat->A,rbs,cbs);
 74:     MatSetBlockSizes(mat->B,rbs,1);
 75:   }
 76:   return(0);
 77: }

 79: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 80: {
 81:   PetscErrorCode  ierr;
 82:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 83:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 84:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 85:   const PetscInt  *ia,*ib;
 86:   const MatScalar *aa,*bb,*aav,*bav;
 87:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 88:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 91:   *keptrows = NULL;

 93:   ia   = a->i;
 94:   ib   = b->i;
 95:   MatSeqAIJGetArrayRead(mat->A,&aav);
 96:   MatSeqAIJGetArrayRead(mat->B,&bav);
 97:   for (i=0; i<m; i++) {
 98:     na = ia[i+1] - ia[i];
 99:     nb = ib[i+1] - ib[i];
100:     if (!na && !nb) {
101:       cnt++;
102:       goto ok1;
103:     }
104:     aa = aav + ia[i];
105:     for (j=0; j<na; j++) {
106:       if (aa[j] != 0.0) goto ok1;
107:     }
108:     bb = bav + ib[i];
109:     for (j=0; j <nb; j++) {
110:       if (bb[j] != 0.0) goto ok1;
111:     }
112:     cnt++;
113: ok1:;
114:   }
115:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
116:   if (!n0rows) {
117:     MatSeqAIJRestoreArrayRead(mat->A,&aav);
118:     MatSeqAIJRestoreArrayRead(mat->B,&bav);
119:     return(0);
120:   }
121:   PetscMalloc1(M->rmap->n-cnt,&rows);
122:   cnt  = 0;
123:   for (i=0; i<m; i++) {
124:     na = ia[i+1] - ia[i];
125:     nb = ib[i+1] - ib[i];
126:     if (!na && !nb) continue;
127:     aa = aav + ia[i];
128:     for (j=0; j<na;j++) {
129:       if (aa[j] != 0.0) {
130:         rows[cnt++] = rstart + i;
131:         goto ok2;
132:       }
133:     }
134:     bb = bav + ib[i];
135:     for (j=0; j<nb; j++) {
136:       if (bb[j] != 0.0) {
137:         rows[cnt++] = rstart + i;
138:         goto ok2;
139:       }
140:     }
141: ok2:;
142:   }
143:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
144:   MatSeqAIJRestoreArrayRead(mat->A,&aav);
145:   MatSeqAIJRestoreArrayRead(mat->B,&bav);
146:   return(0);
147: }

149: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
150: {
151:   PetscErrorCode    ierr;
152:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
153:   PetscBool         cong;

156:   MatHasCongruentLayouts(Y,&cong);
157:   if (Y->assembled && cong) {
158:     MatDiagonalSet(aij->A,D,is);
159:   } else {
160:     MatDiagonalSet_Default(Y,D,is);
161:   }
162:   return(0);
163: }

165: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
166: {
167:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
169:   PetscInt       i,rstart,nrows,*rows;

172:   *zrows = NULL;
173:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
174:   MatGetOwnershipRange(M,&rstart,NULL);
175:   for (i=0; i<nrows; i++) rows[i] += rstart;
176:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
177:   return(0);
178: }

180: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
181: {
182:   PetscErrorCode    ierr;
183:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)A->data;
184:   PetscInt          i,n,*garray = aij->garray;
185:   Mat_SeqAIJ        *a_aij = (Mat_SeqAIJ*) aij->A->data;
186:   Mat_SeqAIJ        *b_aij = (Mat_SeqAIJ*) aij->B->data;
187:   PetscReal         *work;
188:   const PetscScalar *dummy;

191:   MatGetSize(A,NULL,&n);
192:   PetscCalloc1(n,&work);
193:   MatSeqAIJGetArrayRead(aij->A,&dummy);
194:   MatSeqAIJRestoreArrayRead(aij->A,&dummy);
195:   MatSeqAIJGetArrayRead(aij->B,&dummy);
196:   MatSeqAIJRestoreArrayRead(aij->B,&dummy);
197:   if (type == NORM_2) {
198:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
199:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
200:     }
201:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
202:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
203:     }
204:   } else if (type == NORM_1) {
205:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
206:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
207:     }
208:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
209:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
210:     }
211:   } else if (type == NORM_INFINITY) {
212:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
213:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
214:     }
215:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
216:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
217:     }

219:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
220:   if (type == NORM_INFINITY) {
221:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
222:   } else {
223:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
224:   }
225:   PetscFree(work);
226:   if (type == NORM_2) {
227:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
228:   }
229:   return(0);
230: }

232: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
233: {
234:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
235:   IS              sis,gis;
236:   PetscErrorCode  ierr;
237:   const PetscInt  *isis,*igis;
238:   PetscInt        n,*iis,nsis,ngis,rstart,i;

241:   MatFindOffBlockDiagonalEntries(a->A,&sis);
242:   MatFindNonzeroRows(a->B,&gis);
243:   ISGetSize(gis,&ngis);
244:   ISGetSize(sis,&nsis);
245:   ISGetIndices(sis,&isis);
246:   ISGetIndices(gis,&igis);

248:   PetscMalloc1(ngis+nsis,&iis);
249:   PetscArraycpy(iis,igis,ngis);
250:   PetscArraycpy(iis+ngis,isis,nsis);
251:   n    = ngis + nsis;
252:   PetscSortRemoveDupsInt(&n,iis);
253:   MatGetOwnershipRange(A,&rstart,NULL);
254:   for (i=0; i<n; i++) iis[i] += rstart;
255:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

257:   ISRestoreIndices(sis,&isis);
258:   ISRestoreIndices(gis,&igis);
259:   ISDestroy(&sis);
260:   ISDestroy(&gis);
261:   return(0);
262: }

264: /*
265:   Local utility routine that creates a mapping from the global column
266: number to the local number in the off-diagonal part of the local
267: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
268: a slightly higher hash table cost; without it it is not scalable (each processor
269: has an order N integer array but is fast to access.
270: */
271: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
272: {
273:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
275:   PetscInt       n = aij->B->cmap->n,i;

278:   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
279: #if defined(PETSC_USE_CTABLE)
280:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
281:   for (i=0; i<n; i++) {
282:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
283:   }
284: #else
285:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
286:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
287:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
288: #endif
289:   return(0);
290: }

292: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
293: { \
294:     if (col <= lastcol1)  low1 = 0;     \
295:     else                 high1 = nrow1; \
296:     lastcol1 = col;\
297:     while (high1-low1 > 5) { \
298:       t = (low1+high1)/2; \
299:       if (rp1[t] > col) high1 = t; \
300:       else              low1  = t; \
301:     } \
302:       for (_i=low1; _i<high1; _i++) { \
303:         if (rp1[_i] > col) break; \
304:         if (rp1[_i] == col) { \
305:           if (addv == ADD_VALUES) { \
306:             ap1[_i] += value;   \
307:             /* Not sure LogFlops will slow dow the code or not */ \
308:             (void)PetscLogFlops(1.0);   \
309:            } \
310:           else                    ap1[_i] = value; \
311:           inserted = PETSC_TRUE; \
312:           goto a_noinsert; \
313:         } \
314:       }  \
315:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
316:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
317:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
318:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
319:       N = nrow1++ - 1; a->nz++; high1++; \
320:       /* shift up all the later entries in this row */ \
321:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
322:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
323:       rp1[_i] = col;  \
324:       ap1[_i] = value;  \
325:       A->nonzerostate++;\
326:       a_noinsert: ; \
327:       ailen[row] = nrow1; \
328: }

330: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
331:   { \
332:     if (col <= lastcol2) low2 = 0;                        \
333:     else high2 = nrow2;                                   \
334:     lastcol2 = col;                                       \
335:     while (high2-low2 > 5) {                              \
336:       t = (low2+high2)/2;                                 \
337:       if (rp2[t] > col) high2 = t;                        \
338:       else             low2  = t;                         \
339:     }                                                     \
340:     for (_i=low2; _i<high2; _i++) {                       \
341:       if (rp2[_i] > col) break;                           \
342:       if (rp2[_i] == col) {                               \
343:         if (addv == ADD_VALUES) {                         \
344:           ap2[_i] += value;                               \
345:           (void)PetscLogFlops(1.0);                       \
346:         }                                                 \
347:         else                    ap2[_i] = value;          \
348:         inserted = PETSC_TRUE;                            \
349:         goto b_noinsert;                                  \
350:       }                                                   \
351:     }                                                     \
352:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
353:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
354:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
355:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
356:     N = nrow2++ - 1; b->nz++; high2++;                    \
357:     /* shift up all the later entries in this row */      \
358:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
359:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
360:     rp2[_i] = col;                                        \
361:     ap2[_i] = value;                                      \
362:     B->nonzerostate++;                                    \
363:     b_noinsert: ;                                         \
364:     bilen[row] = nrow2;                                   \
365:   }

367: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
368: {
369:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
370:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
372:   PetscInt       l,*garray = mat->garray,diag;

375:   /* code only works for square matrices A */

377:   /* find size of row to the left of the diagonal part */
378:   MatGetOwnershipRange(A,&diag,NULL);
379:   row  = row - diag;
380:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
381:     if (garray[b->j[b->i[row]+l]] > diag) break;
382:   }
383:   PetscArraycpy(b->a+b->i[row],v,l);

385:   /* diagonal part */
386:   PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));

388:   /* right of diagonal part */
389:   PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
390: #if defined(PETSC_HAVE_DEVICE)
391:   if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (l || (a->i[row+1]-a->i[row]) || (b->i[row+1]-b->i[row]-l))) A->offloadmask = PETSC_OFFLOAD_CPU;
392: #endif
393:   return(0);
394: }

396: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
397: {
398:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
399:   PetscScalar    value = 0.0;
401:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
402:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
403:   PetscBool      roworiented = aij->roworiented;

405:   /* Some Variables required in the macro */
406:   Mat        A                    = aij->A;
407:   Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
408:   PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
409:   PetscBool  ignorezeroentries    = a->ignorezeroentries;
410:   Mat        B                    = aij->B;
411:   Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
412:   PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
413:   MatScalar  *aa,*ba;
414:   /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
415:    * cannot use "#if defined" inside a macro. */
416:   PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

418:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
419:   PetscInt  nonew;
420:   MatScalar *ap1,*ap2;

423: #if defined(PETSC_HAVE_DEVICE)
424:   if (A->offloadmask == PETSC_OFFLOAD_GPU) {
425:     const PetscScalar *dummy;
426:     MatSeqAIJGetArrayRead(A,&dummy);
427:     MatSeqAIJRestoreArrayRead(A,&dummy);
428:   }
429:   if (B->offloadmask == PETSC_OFFLOAD_GPU) {
430:     const PetscScalar *dummy;
431:     MatSeqAIJGetArrayRead(B,&dummy);
432:     MatSeqAIJRestoreArrayRead(B,&dummy);
433:   }
434: #endif
435:   aa = a->a;
436:   ba = b->a;
437:   for (i=0; i<m; i++) {
438:     if (im[i] < 0) continue;
439:     if (PetscUnlikely(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
440:     if (im[i] >= rstart && im[i] < rend) {
441:       row      = im[i] - rstart;
442:       lastcol1 = -1;
443:       rp1      = aj + ai[row];
444:       ap1      = aa + ai[row];
445:       rmax1    = aimax[row];
446:       nrow1    = ailen[row];
447:       low1     = 0;
448:       high1    = nrow1;
449:       lastcol2 = -1;
450:       rp2      = bj + bi[row];
451:       ap2      = ba + bi[row];
452:       rmax2    = bimax[row];
453:       nrow2    = bilen[row];
454:       low2     = 0;
455:       high2    = nrow2;

457:       for (j=0; j<n; j++) {
458:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
459:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
460:         if (in[j] >= cstart && in[j] < cend) {
461:           col   = in[j] - cstart;
462:           nonew = a->nonew;
463:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
464: #if defined(PETSC_HAVE_DEVICE)
465:           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
466: #endif
467:         } else if (in[j] < 0) continue;
468:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
469:         else {
470:           if (mat->was_assembled) {
471:             if (!aij->colmap) {
472:               MatCreateColmap_MPIAIJ_Private(mat);
473:             }
474: #if defined(PETSC_USE_CTABLE)
475:             PetscTableFind(aij->colmap,in[j]+1,&col);
476:             col--;
477: #else
478:             col = aij->colmap[in[j]] - 1;
479: #endif
480:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
481:               MatDisAssemble_MPIAIJ(mat);
482:               col  =  in[j];
483:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
484:               B        = aij->B;
485:               b        = (Mat_SeqAIJ*)B->data;
486:               bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
487:               rp2      = bj + bi[row];
488:               ap2      = ba + bi[row];
489:               rmax2    = bimax[row];
490:               nrow2    = bilen[row];
491:               low2     = 0;
492:               high2    = nrow2;
493:               bm       = aij->B->rmap->n;
494:               ba       = b->a;
495:               inserted = PETSC_FALSE;
496:             } else if (col < 0 && !(ignorezeroentries && value == 0.0)) {
497:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
498:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
499:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
500:             }
501:           } else col = in[j];
502:           nonew = b->nonew;
503:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
504: #if defined(PETSC_HAVE_DEVICE)
505:           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
506: #endif
507:         }
508:       }
509:     } else {
510:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
511:       if (!aij->donotstash) {
512:         mat->assembled = PETSC_FALSE;
513:         if (roworiented) {
514:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
515:         } else {
516:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
517:         }
518:       }
519:     }
520:   }
521:   return(0);
522: }

524: /*
525:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
526:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
527:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
528: */
529: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
530: {
531:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
532:   Mat            A           = aij->A; /* diagonal part of the matrix */
533:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
534:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
535:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
536:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
537:   PetscInt       *ailen      = a->ilen,*aj = a->j;
538:   PetscInt       *bilen      = b->ilen,*bj = b->j;
539:   PetscInt       am          = aij->A->rmap->n,j;
540:   PetscInt       diag_so_far = 0,dnz;
541:   PetscInt       offd_so_far = 0,onz;

544:   /* Iterate over all rows of the matrix */
545:   for (j=0; j<am; j++) {
546:     dnz = onz = 0;
547:     /*  Iterate over all non-zero columns of the current row */
548:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
549:       /* If column is in the diagonal */
550:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
551:         aj[diag_so_far++] = mat_j[col] - cstart;
552:         dnz++;
553:       } else { /* off-diagonal entries */
554:         bj[offd_so_far++] = mat_j[col];
555:         onz++;
556:       }
557:     }
558:     ailen[j] = dnz;
559:     bilen[j] = onz;
560:   }
561:   return(0);
562: }

564: /*
565:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
566:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
567:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
568:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
569:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
570: */
571: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
572: {
573:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
574:   Mat            A      = aij->A; /* diagonal part of the matrix */
575:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
576:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
577:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
578:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
579:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
580:   PetscInt       *ailen = a->ilen,*aj = a->j;
581:   PetscInt       *bilen = b->ilen,*bj = b->j;
582:   PetscInt       am     = aij->A->rmap->n,j;
583:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
584:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
585:   PetscScalar    *aa = a->a,*ba = b->a;

588:   /* Iterate over all rows of the matrix */
589:   for (j=0; j<am; j++) {
590:     dnz_row = onz_row = 0;
591:     rowstart_offd = full_offd_i[j];
592:     rowstart_diag = full_diag_i[j];
593:     /*  Iterate over all non-zero columns of the current row */
594:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
595:       /* If column is in the diagonal */
596:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
597:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
598:         aa[rowstart_diag+dnz_row] = mat_a[col];
599:         dnz_row++;
600:       } else { /* off-diagonal entries */
601:         bj[rowstart_offd+onz_row] = mat_j[col];
602:         ba[rowstart_offd+onz_row] = mat_a[col];
603:         onz_row++;
604:       }
605:     }
606:     ailen[j] = dnz_row;
607:     bilen[j] = onz_row;
608:   }
609:   return(0);
610: }

612: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
613: {
614:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
616:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
617:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

620:   for (i=0; i<m; i++) {
621:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
622:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
623:     if (idxm[i] >= rstart && idxm[i] < rend) {
624:       row = idxm[i] - rstart;
625:       for (j=0; j<n; j++) {
626:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
627:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
628:         if (idxn[j] >= cstart && idxn[j] < cend) {
629:           col  = idxn[j] - cstart;
630:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
631:         } else {
632:           if (!aij->colmap) {
633:             MatCreateColmap_MPIAIJ_Private(mat);
634:           }
635: #if defined(PETSC_USE_CTABLE)
636:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
637:           col--;
638: #else
639:           col = aij->colmap[idxn[j]] - 1;
640: #endif
641:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
642:           else {
643:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
644:           }
645:         }
646:       }
647:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
648:   }
649:   return(0);
650: }

652: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
653: {
654:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
656:   PetscInt       nstash,reallocs;

659:   if (aij->donotstash || mat->nooffprocentries) return(0);

661:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
662:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
663:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
664:   return(0);
665: }

667: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
668: {
669:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
671:   PetscMPIInt    n;
672:   PetscInt       i,j,rstart,ncols,flg;
673:   PetscInt       *row,*col;
674:   PetscBool      other_disassembled;
675:   PetscScalar    *val;

677:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

680:   if (!aij->donotstash && !mat->nooffprocentries) {
681:     while (1) {
682:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
683:       if (!flg) break;

685:       for (i=0; i<n;) {
686:         /* Now identify the consecutive vals belonging to the same row */
687:         for (j=i,rstart=row[j]; j<n; j++) {
688:           if (row[j] != rstart) break;
689:         }
690:         if (j < n) ncols = j-i;
691:         else       ncols = n-i;
692:         /* Now assemble all these values with a single function call */
693:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
694:         i    = j;
695:       }
696:     }
697:     MatStashScatterEnd_Private(&mat->stash);
698:   }
699: #if defined(PETSC_HAVE_DEVICE)
700:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
701:   /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
702:   if (mat->boundtocpu) {
703:     MatBindToCPU(aij->A,PETSC_TRUE);
704:     MatBindToCPU(aij->B,PETSC_TRUE);
705:   }
706: #endif
707:   MatAssemblyBegin(aij->A,mode);
708:   MatAssemblyEnd(aij->A,mode);

710:   /* determine if any processor has disassembled, if so we must
711:      also disassemble ourself, in order that we may reassemble. */
712:   /*
713:      if nonzero structure of submatrix B cannot change then we know that
714:      no processor disassembled thus we can skip this stuff
715:   */
716:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
717:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
718:     if (mat->was_assembled && !other_disassembled) {
719: #if defined(PETSC_HAVE_DEVICE)
720:       aij->B->offloadmask = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
721: #endif
722:       MatDisAssemble_MPIAIJ(mat);
723:     }
724:   }
725:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
726:     MatSetUpMultiply_MPIAIJ(mat);
727:   }
728:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
729: #if defined(PETSC_HAVE_DEVICE)
730:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
731: #endif
732:   MatAssemblyBegin(aij->B,mode);
733:   MatAssemblyEnd(aij->B,mode);

735:   PetscFree2(aij->rowvalues,aij->rowindices);

737:   aij->rowvalues = NULL;

739:   VecDestroy(&aij->diag);

741:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
742:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
743:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
744:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
745:   }
746: #if defined(PETSC_HAVE_DEVICE)
747:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
748: #endif
749:   return(0);
750: }

752: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
753: {
754:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

758:   MatZeroEntries(l->A);
759:   MatZeroEntries(l->B);
760:   return(0);
761: }

763: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
764: {
765:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
766:   PetscObjectState sA, sB;
767:   PetscInt        *lrows;
768:   PetscInt         r, len;
769:   PetscBool        cong, lch, gch;
770:   PetscErrorCode   ierr;

773:   /* get locally owned rows */
774:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
775:   MatHasCongruentLayouts(A,&cong);
776:   /* fix right hand side if needed */
777:   if (x && b) {
778:     const PetscScalar *xx;
779:     PetscScalar       *bb;

781:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
782:     VecGetArrayRead(x, &xx);
783:     VecGetArray(b, &bb);
784:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
785:     VecRestoreArrayRead(x, &xx);
786:     VecRestoreArray(b, &bb);
787:   }

789:   sA = mat->A->nonzerostate;
790:   sB = mat->B->nonzerostate;

792:   if (diag != 0.0 && cong) {
793:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
794:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
795:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
796:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
797:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
798:     PetscInt   nnwA, nnwB;
799:     PetscBool  nnzA, nnzB;

801:     nnwA = aijA->nonew;
802:     nnwB = aijB->nonew;
803:     nnzA = aijA->keepnonzeropattern;
804:     nnzB = aijB->keepnonzeropattern;
805:     if (!nnzA) {
806:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
807:       aijA->nonew = 0;
808:     }
809:     if (!nnzB) {
810:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
811:       aijB->nonew = 0;
812:     }
813:     /* Must zero here before the next loop */
814:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
815:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
816:     for (r = 0; r < len; ++r) {
817:       const PetscInt row = lrows[r] + A->rmap->rstart;
818:       if (row >= A->cmap->N) continue;
819:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
820:     }
821:     aijA->nonew = nnwA;
822:     aijB->nonew = nnwB;
823:   } else {
824:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
825:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
826:   }
827:   PetscFree(lrows);
828:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
829:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

831:   /* reduce nonzerostate */
832:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
833:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
834:   if (gch) A->nonzerostate++;
835:   return(0);
836: }

838: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
839: {
840:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
841:   PetscErrorCode    ierr;
842:   PetscMPIInt       n = A->rmap->n;
843:   PetscInt          i,j,r,m,len = 0;
844:   PetscInt          *lrows,*owners = A->rmap->range;
845:   PetscMPIInt       p = 0;
846:   PetscSFNode       *rrows;
847:   PetscSF           sf;
848:   const PetscScalar *xx;
849:   PetscScalar       *bb,*mask;
850:   Vec               xmask,lmask;
851:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
852:   const PetscInt    *aj, *ii,*ridx;
853:   PetscScalar       *aa;

856:   /* Create SF where leaves are input rows and roots are owned rows */
857:   PetscMalloc1(n, &lrows);
858:   for (r = 0; r < n; ++r) lrows[r] = -1;
859:   PetscMalloc1(N, &rrows);
860:   for (r = 0; r < N; ++r) {
861:     const PetscInt idx   = rows[r];
862:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
863:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
864:       PetscLayoutFindOwner(A->rmap,idx,&p);
865:     }
866:     rrows[r].rank  = p;
867:     rrows[r].index = rows[r] - owners[p];
868:   }
869:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
870:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
871:   /* Collect flags for rows to be zeroed */
872:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
873:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
874:   PetscSFDestroy(&sf);
875:   /* Compress and put in row numbers */
876:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
877:   /* zero diagonal part of matrix */
878:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
879:   /* handle off diagonal part of matrix */
880:   MatCreateVecs(A,&xmask,NULL);
881:   VecDuplicate(l->lvec,&lmask);
882:   VecGetArray(xmask,&bb);
883:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
884:   VecRestoreArray(xmask,&bb);
885:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
886:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
887:   VecDestroy(&xmask);
888:   if (x && b) { /* this code is buggy when the row and column layout don't match */
889:     PetscBool cong;

891:     MatHasCongruentLayouts(A,&cong);
892:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
893:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
894:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
895:     VecGetArrayRead(l->lvec,&xx);
896:     VecGetArray(b,&bb);
897:   }
898:   VecGetArray(lmask,&mask);
899:   /* remove zeroed rows of off diagonal matrix */
900:   ii = aij->i;
901:   for (i=0; i<len; i++) {
902:     PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
903:   }
904:   /* loop over all elements of off process part of matrix zeroing removed columns*/
905:   if (aij->compressedrow.use) {
906:     m    = aij->compressedrow.nrows;
907:     ii   = aij->compressedrow.i;
908:     ridx = aij->compressedrow.rindex;
909:     for (i=0; i<m; i++) {
910:       n  = ii[i+1] - ii[i];
911:       aj = aij->j + ii[i];
912:       aa = aij->a + ii[i];

914:       for (j=0; j<n; j++) {
915:         if (PetscAbsScalar(mask[*aj])) {
916:           if (b) bb[*ridx] -= *aa*xx[*aj];
917:           *aa = 0.0;
918:         }
919:         aa++;
920:         aj++;
921:       }
922:       ridx++;
923:     }
924:   } else { /* do not use compressed row format */
925:     m = l->B->rmap->n;
926:     for (i=0; i<m; i++) {
927:       n  = ii[i+1] - ii[i];
928:       aj = aij->j + ii[i];
929:       aa = aij->a + ii[i];
930:       for (j=0; j<n; j++) {
931:         if (PetscAbsScalar(mask[*aj])) {
932:           if (b) bb[i] -= *aa*xx[*aj];
933:           *aa = 0.0;
934:         }
935:         aa++;
936:         aj++;
937:       }
938:     }
939:   }
940:   if (x && b) {
941:     VecRestoreArray(b,&bb);
942:     VecRestoreArrayRead(l->lvec,&xx);
943:   }
944:   VecRestoreArray(lmask,&mask);
945:   VecDestroy(&lmask);
946:   PetscFree(lrows);

948:   /* only change matrix nonzero state if pattern was allowed to be changed */
949:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
950:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
951:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
952:   }
953:   return(0);
954: }

956: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
957: {
958:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
960:   PetscInt       nt;
961:   VecScatter     Mvctx = a->Mvctx;

964:   VecGetLocalSize(xx,&nt);
965:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
966:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
967:   (*a->A->ops->mult)(a->A,xx,yy);
968:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
969:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
970:   return(0);
971: }

973: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
974: {
975:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

979:   MatMultDiagonalBlock(a->A,bb,xx);
980:   return(0);
981: }

983: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
984: {
985:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
987:   VecScatter     Mvctx = a->Mvctx;

990:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
991:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
992:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
993:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
994:   return(0);
995: }

997: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
998: {
999:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1003:   /* do nondiagonal part */
1004:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1005:   /* do local part */
1006:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1007:   /* add partial results together */
1008:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1009:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1010:   return(0);
1011: }

1013: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1014: {
1015:   MPI_Comm       comm;
1016:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1017:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1018:   IS             Me,Notme;
1020:   PetscInt       M,N,first,last,*notme,i;
1021:   PetscBool      lf;
1022:   PetscMPIInt    size;

1025:   /* Easy test: symmetric diagonal block */
1026:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1027:   MatIsTranspose(Adia,Bdia,tol,&lf);
1028:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1029:   if (!*f) return(0);
1030:   PetscObjectGetComm((PetscObject)Amat,&comm);
1031:   MPI_Comm_size(comm,&size);
1032:   if (size == 1) return(0);

1034:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1035:   MatGetSize(Amat,&M,&N);
1036:   MatGetOwnershipRange(Amat,&first,&last);
1037:   PetscMalloc1(N-last+first,&notme);
1038:   for (i=0; i<first; i++) notme[i] = i;
1039:   for (i=last; i<M; i++) notme[i-last+first] = i;
1040:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1041:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1042:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1043:   Aoff = Aoffs[0];
1044:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1045:   Boff = Boffs[0];
1046:   MatIsTranspose(Aoff,Boff,tol,f);
1047:   MatDestroyMatrices(1,&Aoffs);
1048:   MatDestroyMatrices(1,&Boffs);
1049:   ISDestroy(&Me);
1050:   ISDestroy(&Notme);
1051:   PetscFree(notme);
1052:   return(0);
1053: }

1055: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1056: {

1060:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1061:   return(0);
1062: }

1064: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1065: {
1066:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1070:   /* do nondiagonal part */
1071:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1072:   /* do local part */
1073:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1074:   /* add partial results together */
1075:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1076:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1077:   return(0);
1078: }

1080: /*
1081:   This only works correctly for square matrices where the subblock A->A is the
1082:    diagonal block
1083: */
1084: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1085: {
1087:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1090:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1091:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1092:   MatGetDiagonal(a->A,v);
1093:   return(0);
1094: }

1096: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1097: {
1098:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1102:   MatScale(a->A,aa);
1103:   MatScale(a->B,aa);
1104:   return(0);
1105: }

1107: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1108: {
1109:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1113: #if defined(PETSC_USE_LOG)
1114:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1115: #endif
1116:   MatStashDestroy_Private(&mat->stash);
1117:   VecDestroy(&aij->diag);
1118:   MatDestroy(&aij->A);
1119:   MatDestroy(&aij->B);
1120: #if defined(PETSC_USE_CTABLE)
1121:   PetscTableDestroy(&aij->colmap);
1122: #else
1123:   PetscFree(aij->colmap);
1124: #endif
1125:   PetscFree(aij->garray);
1126:   VecDestroy(&aij->lvec);
1127:   VecScatterDestroy(&aij->Mvctx);
1128:   PetscFree2(aij->rowvalues,aij->rowindices);
1129:   PetscFree(aij->ld);
1130:   PetscFree(mat->data);

1132:   /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
1133:   PetscObjectCompose((PetscObject)mat,"MatMergeSeqsToMPI",NULL);

1135:   PetscObjectChangeTypeName((PetscObject)mat,NULL);
1136:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1137:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1138:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1139:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1140:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1141:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1142:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1143:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpibaij_C",NULL);
1144:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1145: #if defined(PETSC_HAVE_CUDA)
1146:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcusparse_C",NULL);
1147: #endif
1148: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1149:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijkokkos_C",NULL);
1150: #endif
1151:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpidense_C",NULL);
1152: #if defined(PETSC_HAVE_ELEMENTAL)
1153:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1154: #endif
1155: #if defined(PETSC_HAVE_SCALAPACK)
1156:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_scalapack_C",NULL);
1157: #endif
1158: #if defined(PETSC_HAVE_HYPRE)
1159:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1160:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",NULL);
1161: #endif
1162:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1163:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_is_mpiaij_C",NULL);
1164:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpiaij_C",NULL);
1165:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetUseScalableIncreaseOverlap_C",NULL);
1166:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijperm_C",NULL);
1167:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijsell_C",NULL);
1168: #if defined(PETSC_HAVE_MKL_SPARSE)
1169:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijmkl_C",NULL);
1170: #endif
1171:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcrl_C",NULL);
1172:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1173:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisell_C",NULL);
1174:   return(0);
1175: }

1177: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1178: {
1179:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1180:   Mat_SeqAIJ        *A   = (Mat_SeqAIJ*)aij->A->data;
1181:   Mat_SeqAIJ        *B   = (Mat_SeqAIJ*)aij->B->data;
1182:   const PetscInt    *garray = aij->garray;
1183:   const PetscScalar *aa,*ba;
1184:   PetscInt          header[4],M,N,m,rs,cs,nz,cnt,i,ja,jb;
1185:   PetscInt          *rowlens;
1186:   PetscInt          *colidxs;
1187:   PetscScalar       *matvals;
1188:   PetscErrorCode    ierr;

1191:   PetscViewerSetUp(viewer);

1193:   M  = mat->rmap->N;
1194:   N  = mat->cmap->N;
1195:   m  = mat->rmap->n;
1196:   rs = mat->rmap->rstart;
1197:   cs = mat->cmap->rstart;
1198:   nz = A->nz + B->nz;

1200:   /* write matrix header */
1201:   header[0] = MAT_FILE_CLASSID;
1202:   header[1] = M; header[2] = N; header[3] = nz;
1203:   MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1204:   PetscViewerBinaryWrite(viewer,header,4,PETSC_INT);

1206:   /* fill in and store row lengths  */
1207:   PetscMalloc1(m,&rowlens);
1208:   for (i=0; i<m; i++) rowlens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1209:   PetscViewerBinaryWriteAll(viewer,rowlens,m,rs,M,PETSC_INT);
1210:   PetscFree(rowlens);

1212:   /* fill in and store column indices */
1213:   PetscMalloc1(nz,&colidxs);
1214:   for (cnt=0, i=0; i<m; i++) {
1215:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1216:       if (garray[B->j[jb]] > cs) break;
1217:       colidxs[cnt++] = garray[B->j[jb]];
1218:     }
1219:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1220:       colidxs[cnt++] = A->j[ja] + cs;
1221:     for (; jb<B->i[i+1]; jb++)
1222:       colidxs[cnt++] = garray[B->j[jb]];
1223:   }
1224:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1225:   PetscViewerBinaryWriteAll(viewer,colidxs,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
1226:   PetscFree(colidxs);

1228:   /* fill in and store nonzero values */
1229:   MatSeqAIJGetArrayRead(aij->A,&aa);
1230:   MatSeqAIJGetArrayRead(aij->B,&ba);
1231:   PetscMalloc1(nz,&matvals);
1232:   for (cnt=0, i=0; i<m; i++) {
1233:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1234:       if (garray[B->j[jb]] > cs) break;
1235:       matvals[cnt++] = ba[jb];
1236:     }
1237:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1238:       matvals[cnt++] = aa[ja];
1239:     for (; jb<B->i[i+1]; jb++)
1240:       matvals[cnt++] = ba[jb];
1241:   }
1242:   MatSeqAIJRestoreArrayRead(aij->A,&aa);
1243:   MatSeqAIJRestoreArrayRead(aij->B,&ba);
1244:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1245:   PetscViewerBinaryWriteAll(viewer,matvals,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
1246:   PetscFree(matvals);

1248:   /* write block size option to the viewer's .info file */
1249:   MatView_Binary_BlockSizes(mat,viewer);
1250:   return(0);
1251: }

1253: #include <petscdraw.h>
1254: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1255: {
1256:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1257:   PetscErrorCode    ierr;
1258:   PetscMPIInt       rank = aij->rank,size = aij->size;
1259:   PetscBool         isdraw,iascii,isbinary;
1260:   PetscViewer       sviewer;
1261:   PetscViewerFormat format;

1264:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1265:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1266:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1267:   if (iascii) {
1268:     PetscViewerGetFormat(viewer,&format);
1269:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1270:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1271:       PetscMalloc1(size,&nz);
1272:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1273:       for (i=0; i<(PetscInt)size; i++) {
1274:         nmax = PetscMax(nmax,nz[i]);
1275:         nmin = PetscMin(nmin,nz[i]);
1276:         navg += nz[i];
1277:       }
1278:       PetscFree(nz);
1279:       navg = navg/size;
1280:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1281:       return(0);
1282:     }
1283:     PetscViewerGetFormat(viewer,&format);
1284:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1285:       MatInfo   info;
1286:       PetscInt *inodes=NULL;

1288:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1289:       MatGetInfo(mat,MAT_LOCAL,&info);
1290:       MatInodeGetInodeSizes(aij->A,NULL,&inodes,NULL);
1291:       PetscViewerASCIIPushSynchronized(viewer);
1292:       if (!inodes) {
1293:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1294:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1295:       } else {
1296:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1297:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1298:       }
1299:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1300:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1301:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1302:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1303:       PetscViewerFlush(viewer);
1304:       PetscViewerASCIIPopSynchronized(viewer);
1305:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1306:       VecScatterView(aij->Mvctx,viewer);
1307:       return(0);
1308:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1309:       PetscInt inodecount,inodelimit,*inodes;
1310:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1311:       if (inodes) {
1312:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1313:       } else {
1314:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1315:       }
1316:       return(0);
1317:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1318:       return(0);
1319:     }
1320:   } else if (isbinary) {
1321:     if (size == 1) {
1322:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1323:       MatView(aij->A,viewer);
1324:     } else {
1325:       MatView_MPIAIJ_Binary(mat,viewer);
1326:     }
1327:     return(0);
1328:   } else if (iascii && size == 1) {
1329:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1330:     MatView(aij->A,viewer);
1331:     return(0);
1332:   } else if (isdraw) {
1333:     PetscDraw draw;
1334:     PetscBool isnull;
1335:     PetscViewerDrawGetDraw(viewer,0,&draw);
1336:     PetscDrawIsNull(draw,&isnull);
1337:     if (isnull) return(0);
1338:   }

1340:   { /* assemble the entire matrix onto first processor */
1341:     Mat A = NULL, Av;
1342:     IS  isrow,iscol;

1344:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1345:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1346:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1347:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1348: /*  The commented code uses MatCreateSubMatrices instead */
1349: /*
1350:     Mat *AA, A = NULL, Av;
1351:     IS  isrow,iscol;

1353:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1354:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1355:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1356:     if (!rank) {
1357:        PetscObjectReference((PetscObject)AA[0]);
1358:        A    = AA[0];
1359:        Av   = AA[0];
1360:     }
1361:     MatDestroySubMatrices(1,&AA);
1362: */
1363:     ISDestroy(&iscol);
1364:     ISDestroy(&isrow);
1365:     /*
1366:        Everyone has to call to draw the matrix since the graphics waits are
1367:        synchronized across all processors that share the PetscDraw object
1368:     */
1369:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1370:     if (!rank) {
1371:       if (((PetscObject)mat)->name) {
1372:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1373:       }
1374:       MatView_SeqAIJ(Av,sviewer);
1375:     }
1376:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1377:     PetscViewerFlush(viewer);
1378:     MatDestroy(&A);
1379:   }
1380:   return(0);
1381: }

1383: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1384: {
1386:   PetscBool      iascii,isdraw,issocket,isbinary;

1389:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1390:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1391:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1392:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1393:   if (iascii || isdraw || isbinary || issocket) {
1394:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1395:   }
1396:   return(0);
1397: }

1399: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1400: {
1401:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1403:   Vec            bb1 = NULL;
1404:   PetscBool      hasop;

1407:   if (flag == SOR_APPLY_UPPER) {
1408:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1409:     return(0);
1410:   }

1412:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1413:     VecDuplicate(bb,&bb1);
1414:   }

1416:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1417:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1418:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1419:       its--;
1420:     }

1422:     while (its--) {
1423:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1424:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1426:       /* update rhs: bb1 = bb - B*x */
1427:       VecScale(mat->lvec,-1.0);
1428:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1430:       /* local sweep */
1431:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1432:     }
1433:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1434:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1435:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1436:       its--;
1437:     }
1438:     while (its--) {
1439:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1440:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1442:       /* update rhs: bb1 = bb - B*x */
1443:       VecScale(mat->lvec,-1.0);
1444:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1446:       /* local sweep */
1447:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1448:     }
1449:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1450:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1451:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1452:       its--;
1453:     }
1454:     while (its--) {
1455:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1456:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1458:       /* update rhs: bb1 = bb - B*x */
1459:       VecScale(mat->lvec,-1.0);
1460:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1462:       /* local sweep */
1463:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1464:     }
1465:   } else if (flag & SOR_EISENSTAT) {
1466:     Vec xx1;

1468:     VecDuplicate(bb,&xx1);
1469:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1471:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1472:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1473:     if (!mat->diag) {
1474:       MatCreateVecs(matin,&mat->diag,NULL);
1475:       MatGetDiagonal(matin,mat->diag);
1476:     }
1477:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1478:     if (hasop) {
1479:       MatMultDiagonalBlock(matin,xx,bb1);
1480:     } else {
1481:       VecPointwiseMult(bb1,mat->diag,xx);
1482:     }
1483:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1485:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1487:     /* local sweep */
1488:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1489:     VecAXPY(xx,1.0,xx1);
1490:     VecDestroy(&xx1);
1491:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1493:   VecDestroy(&bb1);

1495:   matin->factorerrortype = mat->A->factorerrortype;
1496:   return(0);
1497: }

1499: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1500: {
1501:   Mat            aA,aB,Aperm;
1502:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1503:   PetscScalar    *aa,*ba;
1504:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1505:   PetscSF        rowsf,sf;
1506:   IS             parcolp = NULL;
1507:   PetscBool      done;

1511:   MatGetLocalSize(A,&m,&n);
1512:   ISGetIndices(rowp,&rwant);
1513:   ISGetIndices(colp,&cwant);
1514:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1516:   /* Invert row permutation to find out where my rows should go */
1517:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1518:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1519:   PetscSFSetFromOptions(rowsf);
1520:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1521:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);
1522:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);

1524:   /* Invert column permutation to find out where my columns should go */
1525:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1526:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1527:   PetscSFSetFromOptions(sf);
1528:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1529:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1530:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1531:   PetscSFDestroy(&sf);

1533:   ISRestoreIndices(rowp,&rwant);
1534:   ISRestoreIndices(colp,&cwant);
1535:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1537:   /* Find out where my gcols should go */
1538:   MatGetSize(aB,NULL,&ng);
1539:   PetscMalloc1(ng,&gcdest);
1540:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1541:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1542:   PetscSFSetFromOptions(sf);
1543:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1544:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1545:   PetscSFDestroy(&sf);

1547:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1548:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1549:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1550:   for (i=0; i<m; i++) {
1551:     PetscInt    row = rdest[i];
1552:     PetscMPIInt rowner;
1553:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1554:     for (j=ai[i]; j<ai[i+1]; j++) {
1555:       PetscInt    col = cdest[aj[j]];
1556:       PetscMPIInt cowner;
1557:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1558:       if (rowner == cowner) dnnz[i]++;
1559:       else onnz[i]++;
1560:     }
1561:     for (j=bi[i]; j<bi[i+1]; j++) {
1562:       PetscInt    col = gcdest[bj[j]];
1563:       PetscMPIInt cowner;
1564:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1565:       if (rowner == cowner) dnnz[i]++;
1566:       else onnz[i]++;
1567:     }
1568:   }
1569:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1570:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1571:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1572:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1573:   PetscSFDestroy(&rowsf);

1575:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1576:   MatSeqAIJGetArray(aA,&aa);
1577:   MatSeqAIJGetArray(aB,&ba);
1578:   for (i=0; i<m; i++) {
1579:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1580:     PetscInt j0,rowlen;
1581:     rowlen = ai[i+1] - ai[i];
1582:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1583:       for (; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1584:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1585:     }
1586:     rowlen = bi[i+1] - bi[i];
1587:     for (j0=j=0; j<rowlen; j0=j) {
1588:       for (; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1589:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1590:     }
1591:   }
1592:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1593:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1594:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1595:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1596:   MatSeqAIJRestoreArray(aA,&aa);
1597:   MatSeqAIJRestoreArray(aB,&ba);
1598:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1599:   PetscFree3(work,rdest,cdest);
1600:   PetscFree(gcdest);
1601:   if (parcolp) {ISDestroy(&colp);}
1602:   *B = Aperm;
1603:   return(0);
1604: }

1606: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1607: {
1608:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1612:   MatGetSize(aij->B,NULL,nghosts);
1613:   if (ghosts) *ghosts = aij->garray;
1614:   return(0);
1615: }

1617: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1618: {
1619:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1620:   Mat            A    = mat->A,B = mat->B;
1622:   PetscLogDouble isend[5],irecv[5];

1625:   info->block_size = 1.0;
1626:   MatGetInfo(A,MAT_LOCAL,info);

1628:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1629:   isend[3] = info->memory;  isend[4] = info->mallocs;

1631:   MatGetInfo(B,MAT_LOCAL,info);

1633:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1634:   isend[3] += info->memory;  isend[4] += info->mallocs;
1635:   if (flag == MAT_LOCAL) {
1636:     info->nz_used      = isend[0];
1637:     info->nz_allocated = isend[1];
1638:     info->nz_unneeded  = isend[2];
1639:     info->memory       = isend[3];
1640:     info->mallocs      = isend[4];
1641:   } else if (flag == MAT_GLOBAL_MAX) {
1642:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));

1644:     info->nz_used      = irecv[0];
1645:     info->nz_allocated = irecv[1];
1646:     info->nz_unneeded  = irecv[2];
1647:     info->memory       = irecv[3];
1648:     info->mallocs      = irecv[4];
1649:   } else if (flag == MAT_GLOBAL_SUM) {
1650:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));

1652:     info->nz_used      = irecv[0];
1653:     info->nz_allocated = irecv[1];
1654:     info->nz_unneeded  = irecv[2];
1655:     info->memory       = irecv[3];
1656:     info->mallocs      = irecv[4];
1657:   }
1658:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1659:   info->fill_ratio_needed = 0;
1660:   info->factor_mallocs    = 0;
1661:   return(0);
1662: }

1664: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1665: {
1666:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1670:   switch (op) {
1671:   case MAT_NEW_NONZERO_LOCATIONS:
1672:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1673:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1674:   case MAT_KEEP_NONZERO_PATTERN:
1675:   case MAT_NEW_NONZERO_LOCATION_ERR:
1676:   case MAT_USE_INODES:
1677:   case MAT_IGNORE_ZERO_ENTRIES:
1678:   case MAT_FORM_EXPLICIT_TRANSPOSE:
1679:     MatCheckPreallocated(A,1);
1680:     MatSetOption(a->A,op,flg);
1681:     MatSetOption(a->B,op,flg);
1682:     break;
1683:   case MAT_ROW_ORIENTED:
1684:     MatCheckPreallocated(A,1);
1685:     a->roworiented = flg;

1687:     MatSetOption(a->A,op,flg);
1688:     MatSetOption(a->B,op,flg);
1689:     break;
1690:   case MAT_FORCE_DIAGONAL_ENTRIES:
1691:   case MAT_SORTED_FULL:
1692:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1693:     break;
1694:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1695:     a->donotstash = flg;
1696:     break;
1697:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1698:   case MAT_SPD:
1699:   case MAT_SYMMETRIC:
1700:   case MAT_STRUCTURALLY_SYMMETRIC:
1701:   case MAT_HERMITIAN:
1702:   case MAT_SYMMETRY_ETERNAL:
1703:     break;
1704:   case MAT_SUBMAT_SINGLEIS:
1705:     A->submat_singleis = flg;
1706:     break;
1707:   case MAT_STRUCTURE_ONLY:
1708:     /* The option is handled directly by MatSetOption() */
1709:     break;
1710:   default:
1711:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1712:   }
1713:   return(0);
1714: }

1716: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1717: {
1718:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1719:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1721:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1722:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1723:   PetscInt       *cmap,*idx_p;

1726:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1727:   mat->getrowactive = PETSC_TRUE;

1729:   if (!mat->rowvalues && (idx || v)) {
1730:     /*
1731:         allocate enough space to hold information from the longest row.
1732:     */
1733:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1734:     PetscInt   max = 1,tmp;
1735:     for (i=0; i<matin->rmap->n; i++) {
1736:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1737:       if (max < tmp) max = tmp;
1738:     }
1739:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1740:   }

1742:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1743:   lrow = row - rstart;

1745:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1746:   if (!v)   {pvA = NULL; pvB = NULL;}
1747:   if (!idx) {pcA = NULL; if (!v) pcB = NULL;}
1748:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1749:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1750:   nztot = nzA + nzB;

1752:   cmap = mat->garray;
1753:   if (v  || idx) {
1754:     if (nztot) {
1755:       /* Sort by increasing column numbers, assuming A and B already sorted */
1756:       PetscInt imark = -1;
1757:       if (v) {
1758:         *v = v_p = mat->rowvalues;
1759:         for (i=0; i<nzB; i++) {
1760:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1761:           else break;
1762:         }
1763:         imark = i;
1764:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1765:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1766:       }
1767:       if (idx) {
1768:         *idx = idx_p = mat->rowindices;
1769:         if (imark > -1) {
1770:           for (i=0; i<imark; i++) {
1771:             idx_p[i] = cmap[cworkB[i]];
1772:           }
1773:         } else {
1774:           for (i=0; i<nzB; i++) {
1775:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1776:             else break;
1777:           }
1778:           imark = i;
1779:         }
1780:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1781:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1782:       }
1783:     } else {
1784:       if (idx) *idx = NULL;
1785:       if (v)   *v   = NULL;
1786:     }
1787:   }
1788:   *nz  = nztot;
1789:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1790:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1791:   return(0);
1792: }

1794: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1795: {
1796:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1799:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1800:   aij->getrowactive = PETSC_FALSE;
1801:   return(0);
1802: }

1804: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1805: {
1806:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1807:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1809:   PetscInt       i,j,cstart = mat->cmap->rstart;
1810:   PetscReal      sum = 0.0;
1811:   MatScalar      *v;

1814:   if (aij->size == 1) {
1815:      MatNorm(aij->A,type,norm);
1816:   } else {
1817:     if (type == NORM_FROBENIUS) {
1818:       v = amat->a;
1819:       for (i=0; i<amat->nz; i++) {
1820:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1821:       }
1822:       v = bmat->a;
1823:       for (i=0; i<bmat->nz; i++) {
1824:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1825:       }
1826:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1827:       *norm = PetscSqrtReal(*norm);
1828:       PetscLogFlops(2.0*amat->nz+2.0*bmat->nz);
1829:     } else if (type == NORM_1) { /* max column norm */
1830:       PetscReal *tmp,*tmp2;
1831:       PetscInt  *jj,*garray = aij->garray;
1832:       PetscCalloc1(mat->cmap->N+1,&tmp);
1833:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1834:       *norm = 0.0;
1835:       v     = amat->a; jj = amat->j;
1836:       for (j=0; j<amat->nz; j++) {
1837:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1838:       }
1839:       v = bmat->a; jj = bmat->j;
1840:       for (j=0; j<bmat->nz; j++) {
1841:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1842:       }
1843:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1844:       for (j=0; j<mat->cmap->N; j++) {
1845:         if (tmp2[j] > *norm) *norm = tmp2[j];
1846:       }
1847:       PetscFree(tmp);
1848:       PetscFree(tmp2);
1849:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1850:     } else if (type == NORM_INFINITY) { /* max row norm */
1851:       PetscReal ntemp = 0.0;
1852:       for (j=0; j<aij->A->rmap->n; j++) {
1853:         v   = amat->a + amat->i[j];
1854:         sum = 0.0;
1855:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1856:           sum += PetscAbsScalar(*v); v++;
1857:         }
1858:         v = bmat->a + bmat->i[j];
1859:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1860:           sum += PetscAbsScalar(*v); v++;
1861:         }
1862:         if (sum > ntemp) ntemp = sum;
1863:       }
1864:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1865:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1866:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1867:   }
1868:   return(0);
1869: }

1871: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1872: {
1873:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
1874:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
1875:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
1876:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
1877:   PetscErrorCode  ierr;
1878:   Mat             B,A_diag,*B_diag;
1879:   const MatScalar *pbv,*bv;

1882:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1883:   ai = Aloc->i; aj = Aloc->j;
1884:   bi = Bloc->i; bj = Bloc->j;
1885:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1886:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
1887:     PetscSFNode          *oloc;
1888:     PETSC_UNUSED PetscSF sf;

1890:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1891:     /* compute d_nnz for preallocation */
1892:     PetscArrayzero(d_nnz,na);
1893:     for (i=0; i<ai[ma]; i++) {
1894:       d_nnz[aj[i]]++;
1895:     }
1896:     /* compute local off-diagonal contributions */
1897:     PetscArrayzero(g_nnz,nb);
1898:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1899:     /* map those to global */
1900:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1901:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
1902:     PetscSFSetFromOptions(sf);
1903:     PetscArrayzero(o_nnz,na);
1904:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1905:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1906:     PetscSFDestroy(&sf);

1908:     MatCreate(PetscObjectComm((PetscObject)A),&B);
1909:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1910:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
1911:     MatSetType(B,((PetscObject)A)->type_name);
1912:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
1913:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
1914:   } else {
1915:     B    = *matout;
1916:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
1917:   }

1919:   b           = (Mat_MPIAIJ*)B->data;
1920:   A_diag      = a->A;
1921:   B_diag      = &b->A;
1922:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
1923:   A_diag_ncol = A_diag->cmap->N;
1924:   B_diag_ilen = sub_B_diag->ilen;
1925:   B_diag_i    = sub_B_diag->i;

1927:   /* Set ilen for diagonal of B */
1928:   for (i=0; i<A_diag_ncol; i++) {
1929:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
1930:   }

1932:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
1933:   very quickly (=without using MatSetValues), because all writes are local. */
1934:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

1936:   /* copy over the B part */
1937:   PetscMalloc1(bi[mb],&cols);
1938:   MatSeqAIJGetArrayRead(a->B,&bv);
1939:   pbv  = bv;
1940:   row  = A->rmap->rstart;
1941:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
1942:   cols_tmp = cols;
1943:   for (i=0; i<mb; i++) {
1944:     ncol = bi[i+1]-bi[i];
1945:     MatSetValues(B,ncol,cols_tmp,1,&row,pbv,INSERT_VALUES);
1946:     row++;
1947:     pbv += ncol; cols_tmp += ncol;
1948:   }
1949:   PetscFree(cols);
1950:   MatSeqAIJRestoreArrayRead(a->B,&bv);

1952:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1953:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1954:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1955:     *matout = B;
1956:   } else {
1957:     MatHeaderMerge(A,&B);
1958:   }
1959:   return(0);
1960: }

1962: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1963: {
1964:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1965:   Mat            a    = aij->A,b = aij->B;
1967:   PetscInt       s1,s2,s3;

1970:   MatGetLocalSize(mat,&s2,&s3);
1971:   if (rr) {
1972:     VecGetLocalSize(rr,&s1);
1973:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1974:     /* Overlap communication with computation. */
1975:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1976:   }
1977:   if (ll) {
1978:     VecGetLocalSize(ll,&s1);
1979:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1980:     (*b->ops->diagonalscale)(b,ll,NULL);
1981:   }
1982:   /* scale  the diagonal block */
1983:   (*a->ops->diagonalscale)(a,ll,rr);

1985:   if (rr) {
1986:     /* Do a scatter end and then right scale the off-diagonal block */
1987:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1988:     (*b->ops->diagonalscale)(b,NULL,aij->lvec);
1989:   }
1990:   return(0);
1991: }

1993: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
1994: {
1995:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1999:   MatSetUnfactored(a->A);
2000:   return(0);
2001: }

2003: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2004: {
2005:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2006:   Mat            a,b,c,d;
2007:   PetscBool      flg;

2011:   a = matA->A; b = matA->B;
2012:   c = matB->A; d = matB->B;

2014:   MatEqual(a,c,&flg);
2015:   if (flg) {
2016:     MatEqual(b,d,&flg);
2017:   }
2018:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2019:   return(0);
2020: }

2022: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2023: {
2025:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2026:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2029:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2030:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2031:     /* because of the column compression in the off-processor part of the matrix a->B,
2032:        the number of columns in a->B and b->B may be different, hence we cannot call
2033:        the MatCopy() directly on the two parts. If need be, we can provide a more
2034:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2035:        then copying the submatrices */
2036:     MatCopy_Basic(A,B,str);
2037:   } else {
2038:     MatCopy(a->A,b->A,str);
2039:     MatCopy(a->B,b->B,str);
2040:   }
2041:   PetscObjectStateIncrease((PetscObject)B);
2042:   return(0);
2043: }

2045: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2046: {

2050:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);
2051:   return(0);
2052: }

2054: /*
2055:    Computes the number of nonzeros per row needed for preallocation when X and Y
2056:    have different nonzero structure.
2057: */
2058: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2059: {
2060:   PetscInt       i,j,k,nzx,nzy;

2063:   /* Set the number of nonzeros in the new matrix */
2064:   for (i=0; i<m; i++) {
2065:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2066:     nzx = xi[i+1] - xi[i];
2067:     nzy = yi[i+1] - yi[i];
2068:     nnz[i] = 0;
2069:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2070:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2071:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2072:       nnz[i]++;
2073:     }
2074:     for (; k<nzy; k++) nnz[i]++;
2075:   }
2076:   return(0);
2077: }

2079: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2080: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2081: {
2083:   PetscInt       m = Y->rmap->N;
2084:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2085:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2088:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2089:   return(0);
2090: }

2092: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2093: {
2095:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;

2098:   if (str == SAME_NONZERO_PATTERN) {
2099:     MatAXPY(yy->A,a,xx->A,str);
2100:     MatAXPY(yy->B,a,xx->B,str);
2101:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2102:     MatAXPY_Basic(Y,a,X,str);
2103:   } else {
2104:     Mat      B;
2105:     PetscInt *nnz_d,*nnz_o;

2107:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2108:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2109:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2110:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2111:     MatSetLayouts(B,Y->rmap,Y->cmap);
2112:     MatSetType(B,((PetscObject)Y)->type_name);
2113:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2114:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2115:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2116:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2117:     MatHeaderReplace(Y,&B);
2118:     PetscFree(nnz_d);
2119:     PetscFree(nnz_o);
2120:   }
2121:   return(0);
2122: }

2124: PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);

2126: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2127: {
2128: #if defined(PETSC_USE_COMPLEX)
2130:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2133:   MatConjugate_SeqAIJ(aij->A);
2134:   MatConjugate_SeqAIJ(aij->B);
2135: #else
2137: #endif
2138:   return(0);
2139: }

2141: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2142: {
2143:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2147:   MatRealPart(a->A);
2148:   MatRealPart(a->B);
2149:   return(0);
2150: }

2152: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2153: {
2154:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2158:   MatImaginaryPart(a->A);
2159:   MatImaginaryPart(a->B);
2160:   return(0);
2161: }

2163: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2164: {
2165:   Mat_MPIAIJ        *a = (Mat_MPIAIJ*)A->data;
2166:   PetscErrorCode    ierr;
2167:   PetscInt          i,*idxb = NULL,m = A->rmap->n;
2168:   PetscScalar       *va,*vv;
2169:   Vec               vB,vA;
2170:   const PetscScalar *vb;

2173:   VecCreateSeq(PETSC_COMM_SELF,m,&vA);
2174:   MatGetRowMaxAbs(a->A,vA,idx);

2176:   VecGetArrayWrite(vA,&va);
2177:   if (idx) {
2178:     for (i=0; i<m; i++) {
2179:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2180:     }
2181:   }

2183:   VecCreateSeq(PETSC_COMM_SELF,m,&vB);
2184:   PetscMalloc1(m,&idxb);
2185:   MatGetRowMaxAbs(a->B,vB,idxb);

2187:   VecGetArrayWrite(v,&vv);
2188:   VecGetArrayRead(vB,&vb);
2189:   for (i=0; i<m; i++) {
2190:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2191:       vv[i] = vb[i];
2192:       if (idx) idx[i] = a->garray[idxb[i]];
2193:     } else {
2194:       vv[i] = va[i];
2195:       if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]])
2196:         idx[i] = a->garray[idxb[i]];
2197:     }
2198:   }
2199:   VecRestoreArrayWrite(vA,&vv);
2200:   VecRestoreArrayWrite(vA,&va);
2201:   VecRestoreArrayRead(vB,&vb);
2202:   PetscFree(idxb);
2203:   VecDestroy(&vA);
2204:   VecDestroy(&vB);
2205:   return(0);
2206: }

2208: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2209: {
2210:   Mat_MPIAIJ        *mat   = (Mat_MPIAIJ*) A->data;
2211:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2212:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2213:   PetscInt          *cmap  = mat->garray;
2214:   PetscInt          *diagIdx, *offdiagIdx;
2215:   Vec               diagV, offdiagV;
2216:   PetscScalar       *a, *diagA, *offdiagA;
2217:   const PetscScalar *ba,*bav;
2218:   PetscInt          r,j,col,ncols,*bi,*bj;
2219:   PetscErrorCode    ierr;
2220:   Mat               B = mat->B;
2221:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2224:   /* When a process holds entire A and other processes have no entry */
2225:   if (A->cmap->N == n) {
2226:     VecGetArrayWrite(v,&diagA);
2227:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2228:     MatGetRowMinAbs(mat->A,diagV,idx);
2229:     VecDestroy(&diagV);
2230:     VecRestoreArrayWrite(v,&diagA);
2231:     return(0);
2232:   } else if (n == 0) {
2233:     if (m) {
2234:       VecGetArrayWrite(v,&a);
2235:       for (r = 0; r < m; r++) {a[r] = 0.0; if (idx) idx[r] = -1;}
2236:       VecRestoreArrayWrite(v,&a);
2237:     }
2238:     return(0);
2239:   }

2241:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2242:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2243:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2244:   MatGetRowMinAbs(mat->A, diagV, diagIdx);

2246:   /* Get offdiagIdx[] for implicit 0.0 */
2247:   MatSeqAIJGetArrayRead(B,&bav);
2248:   ba   = bav;
2249:   bi   = b->i;
2250:   bj   = b->j;
2251:   VecGetArrayWrite(offdiagV, &offdiagA);
2252:   for (r = 0; r < m; r++) {
2253:     ncols = bi[r+1] - bi[r];
2254:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2255:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2256:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2257:       offdiagA[r] = 0.0;

2259:       /* Find first hole in the cmap */
2260:       for (j=0; j<ncols; j++) {
2261:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2262:         if (col > j && j < cstart) {
2263:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2264:           break;
2265:         } else if (col > j + n && j >= cstart) {
2266:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2267:           break;
2268:         }
2269:       }
2270:       if (j == ncols && ncols < A->cmap->N - n) {
2271:         /* a hole is outside compressed Bcols */
2272:         if (ncols == 0) {
2273:           if (cstart) {
2274:             offdiagIdx[r] = 0;
2275:           } else offdiagIdx[r] = cend;
2276:         } else { /* ncols > 0 */
2277:           offdiagIdx[r] = cmap[ncols-1] + 1;
2278:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2279:         }
2280:       }
2281:     }

2283:     for (j=0; j<ncols; j++) {
2284:       if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2285:       ba++; bj++;
2286:     }
2287:   }

2289:   VecGetArrayWrite(v, &a);
2290:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2291:   for (r = 0; r < m; ++r) {
2292:     if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) {
2293:       a[r]   = diagA[r];
2294:       if (idx) idx[r] = cstart + diagIdx[r];
2295:     } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) {
2296:       a[r] = diagA[r];
2297:       if (idx) {
2298:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2299:           idx[r] = cstart + diagIdx[r];
2300:         } else idx[r] = offdiagIdx[r];
2301:       }
2302:     } else {
2303:       a[r]   = offdiagA[r];
2304:       if (idx) idx[r] = offdiagIdx[r];
2305:     }
2306:   }
2307:   MatSeqAIJRestoreArrayRead(B,&bav);
2308:   VecRestoreArrayWrite(v, &a);
2309:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2310:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2311:   VecDestroy(&diagV);
2312:   VecDestroy(&offdiagV);
2313:   PetscFree2(diagIdx, offdiagIdx);
2314:   return(0);
2315: }

2317: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2318: {
2319:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*) A->data;
2320:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2321:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2322:   PetscInt          *cmap  = mat->garray;
2323:   PetscInt          *diagIdx, *offdiagIdx;
2324:   Vec               diagV, offdiagV;
2325:   PetscScalar       *a, *diagA, *offdiagA;
2326:   const PetscScalar *ba,*bav;
2327:   PetscInt          r,j,col,ncols,*bi,*bj;
2328:   PetscErrorCode    ierr;
2329:   Mat               B = mat->B;
2330:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2333:   /* When a process holds entire A and other processes have no entry */
2334:   if (A->cmap->N == n) {
2335:     VecGetArrayWrite(v,&diagA);
2336:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2337:     MatGetRowMin(mat->A,diagV,idx);
2338:     VecDestroy(&diagV);
2339:     VecRestoreArrayWrite(v,&diagA);
2340:     return(0);
2341:   } else if (n == 0) {
2342:     if (m) {
2343:       VecGetArrayWrite(v,&a);
2344:       for (r = 0; r < m; r++) {a[r] = PETSC_MAX_REAL; if (idx) idx[r] = -1;}
2345:       VecRestoreArrayWrite(v,&a);
2346:     }
2347:     return(0);
2348:   }

2350:   PetscCalloc2(m,&diagIdx,m,&offdiagIdx);
2351:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2352:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2353:   MatGetRowMin(mat->A, diagV, diagIdx);

2355:   /* Get offdiagIdx[] for implicit 0.0 */
2356:   MatSeqAIJGetArrayRead(B,&bav);
2357:   ba   = bav;
2358:   bi   = b->i;
2359:   bj   = b->j;
2360:   VecGetArrayWrite(offdiagV, &offdiagA);
2361:   for (r = 0; r < m; r++) {
2362:     ncols = bi[r+1] - bi[r];
2363:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2364:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2365:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2366:       offdiagA[r] = 0.0;

2368:       /* Find first hole in the cmap */
2369:       for (j=0; j<ncols; j++) {
2370:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2371:         if (col > j && j < cstart) {
2372:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2373:           break;
2374:         } else if (col > j + n && j >= cstart) {
2375:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2376:           break;
2377:         }
2378:       }
2379:       if (j == ncols && ncols < A->cmap->N - n) {
2380:         /* a hole is outside compressed Bcols */
2381:         if (ncols == 0) {
2382:           if (cstart) {
2383:             offdiagIdx[r] = 0;
2384:           } else offdiagIdx[r] = cend;
2385:         } else { /* ncols > 0 */
2386:           offdiagIdx[r] = cmap[ncols-1] + 1;
2387:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2388:         }
2389:       }
2390:     }

2392:     for (j=0; j<ncols; j++) {
2393:       if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2394:       ba++; bj++;
2395:     }
2396:   }

2398:   VecGetArrayWrite(v, &a);
2399:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2400:   for (r = 0; r < m; ++r) {
2401:     if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) {
2402:       a[r]   = diagA[r];
2403:       if (idx) idx[r] = cstart + diagIdx[r];
2404:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2405:       a[r] = diagA[r];
2406:       if (idx) {
2407:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2408:           idx[r] = cstart + diagIdx[r];
2409:         } else idx[r] = offdiagIdx[r];
2410:       }
2411:     } else {
2412:       a[r]   = offdiagA[r];
2413:       if (idx) idx[r] = offdiagIdx[r];
2414:     }
2415:   }
2416:   MatSeqAIJRestoreArrayRead(B,&bav);
2417:   VecRestoreArrayWrite(v, &a);
2418:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2419:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2420:   VecDestroy(&diagV);
2421:   VecDestroy(&offdiagV);
2422:   PetscFree2(diagIdx, offdiagIdx);
2423:   return(0);
2424: }

2426: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2427: {
2428:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*)A->data;
2429:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2430:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2431:   PetscInt          *cmap  = mat->garray;
2432:   PetscInt          *diagIdx, *offdiagIdx;
2433:   Vec               diagV, offdiagV;
2434:   PetscScalar       *a, *diagA, *offdiagA;
2435:   const PetscScalar *ba,*bav;
2436:   PetscInt          r,j,col,ncols,*bi,*bj;
2437:   PetscErrorCode    ierr;
2438:   Mat               B = mat->B;
2439:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2442:   /* When a process holds entire A and other processes have no entry */
2443:   if (A->cmap->N == n) {
2444:     VecGetArrayWrite(v,&diagA);
2445:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2446:     MatGetRowMax(mat->A,diagV,idx);
2447:     VecDestroy(&diagV);
2448:     VecRestoreArrayWrite(v,&diagA);
2449:     return(0);
2450:   } else if (n == 0) {
2451:     if (m) {
2452:       VecGetArrayWrite(v,&a);
2453:       for (r = 0; r < m; r++) {a[r] = PETSC_MIN_REAL; if (idx) idx[r] = -1;}
2454:       VecRestoreArrayWrite(v,&a);
2455:     }
2456:     return(0);
2457:   }

2459:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2460:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2461:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2462:   MatGetRowMax(mat->A, diagV, diagIdx);

2464:   /* Get offdiagIdx[] for implicit 0.0 */
2465:   MatSeqAIJGetArrayRead(B,&bav);
2466:   ba   = bav;
2467:   bi   = b->i;
2468:   bj   = b->j;
2469:   VecGetArrayWrite(offdiagV, &offdiagA);
2470:   for (r = 0; r < m; r++) {
2471:     ncols = bi[r+1] - bi[r];
2472:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2473:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2474:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2475:       offdiagA[r] = 0.0;

2477:       /* Find first hole in the cmap */
2478:       for (j=0; j<ncols; j++) {
2479:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2480:         if (col > j && j < cstart) {
2481:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2482:           break;
2483:         } else if (col > j + n && j >= cstart) {
2484:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2485:           break;
2486:         }
2487:       }
2488:       if (j == ncols && ncols < A->cmap->N - n) {
2489:         /* a hole is outside compressed Bcols */
2490:         if (ncols == 0) {
2491:           if (cstart) {
2492:             offdiagIdx[r] = 0;
2493:           } else offdiagIdx[r] = cend;
2494:         } else { /* ncols > 0 */
2495:           offdiagIdx[r] = cmap[ncols-1] + 1;
2496:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2497:         }
2498:       }
2499:     }

2501:     for (j=0; j<ncols; j++) {
2502:       if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2503:       ba++; bj++;
2504:     }
2505:   }

2507:   VecGetArrayWrite(v,    &a);
2508:   VecGetArrayRead(diagV,(const PetscScalar**)&diagA);
2509:   for (r = 0; r < m; ++r) {
2510:     if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) {
2511:       a[r] = diagA[r];
2512:       if (idx) idx[r] = cstart + diagIdx[r];
2513:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2514:       a[r] = diagA[r];
2515:       if (idx) {
2516:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2517:           idx[r] = cstart + diagIdx[r];
2518:         } else idx[r] = offdiagIdx[r];
2519:       }
2520:     } else {
2521:       a[r] = offdiagA[r];
2522:       if (idx) idx[r] = offdiagIdx[r];
2523:     }
2524:   }
2525:   MatSeqAIJRestoreArrayRead(B,&bav);
2526:   VecRestoreArrayWrite(v,       &a);
2527:   VecRestoreArrayRead(diagV,   (const PetscScalar**)&diagA);
2528:   VecRestoreArrayWrite(offdiagV,&offdiagA);
2529:   VecDestroy(&diagV);
2530:   VecDestroy(&offdiagV);
2531:   PetscFree2(diagIdx, offdiagIdx);
2532:   return(0);
2533: }

2535: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2536: {
2538:   Mat            *dummy;

2541:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2542:   *newmat = *dummy;
2543:   PetscFree(dummy);
2544:   return(0);
2545: }

2547: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2548: {
2549:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2553:   MatInvertBlockDiagonal(a->A,values);
2554:   A->factorerrortype = a->A->factorerrortype;
2555:   return(0);
2556: }

2558: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2559: {
2561:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2564:   if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2565:   MatSetRandom(aij->A,rctx);
2566:   if (x->assembled) {
2567:     MatSetRandom(aij->B,rctx);
2568:   } else {
2569:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2570:   }
2571:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2572:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2573:   return(0);
2574: }

2576: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2577: {
2579:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2580:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2581:   return(0);
2582: }

2584: /*@
2585:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2587:    Collective on Mat

2589:    Input Parameters:
2590: +    A - the matrix
2591: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2593:  Level: advanced

2595: @*/
2596: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2597: {
2598:   PetscErrorCode       ierr;

2601:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2602:   return(0);
2603: }

2605: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2606: {
2607:   PetscErrorCode       ierr;
2608:   PetscBool            sc = PETSC_FALSE,flg;

2611:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2612:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2613:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2614:   if (flg) {
2615:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2616:   }
2617:   PetscOptionsTail();
2618:   return(0);
2619: }

2621: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2622: {
2624:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2625:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2628:   if (!Y->preallocated) {
2629:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2630:   } else if (!aij->nz) {
2631:     PetscInt nonew = aij->nonew;
2632:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2633:     aij->nonew = nonew;
2634:   }
2635:   MatShift_Basic(Y,a);
2636:   return(0);
2637: }

2639: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2640: {
2641:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2645:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2646:   MatMissingDiagonal(a->A,missing,d);
2647:   if (d) {
2648:     PetscInt rstart;
2649:     MatGetOwnershipRange(A,&rstart,NULL);
2650:     *d += rstart;

2652:   }
2653:   return(0);
2654: }

2656: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2657: {
2658:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2662:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2663:   return(0);
2664: }

2666: /* -------------------------------------------------------------------*/
2667: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2668:                                        MatGetRow_MPIAIJ,
2669:                                        MatRestoreRow_MPIAIJ,
2670:                                        MatMult_MPIAIJ,
2671:                                 /* 4*/ MatMultAdd_MPIAIJ,
2672:                                        MatMultTranspose_MPIAIJ,
2673:                                        MatMultTransposeAdd_MPIAIJ,
2674:                                        NULL,
2675:                                        NULL,
2676:                                        NULL,
2677:                                 /*10*/ NULL,
2678:                                        NULL,
2679:                                        NULL,
2680:                                        MatSOR_MPIAIJ,
2681:                                        MatTranspose_MPIAIJ,
2682:                                 /*15*/ MatGetInfo_MPIAIJ,
2683:                                        MatEqual_MPIAIJ,
2684:                                        MatGetDiagonal_MPIAIJ,
2685:                                        MatDiagonalScale_MPIAIJ,
2686:                                        MatNorm_MPIAIJ,
2687:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2688:                                        MatAssemblyEnd_MPIAIJ,
2689:                                        MatSetOption_MPIAIJ,
2690:                                        MatZeroEntries_MPIAIJ,
2691:                                 /*24*/ MatZeroRows_MPIAIJ,
2692:                                        NULL,
2693:                                        NULL,
2694:                                        NULL,
2695:                                        NULL,
2696:                                 /*29*/ MatSetUp_MPIAIJ,
2697:                                        NULL,
2698:                                        NULL,
2699:                                        MatGetDiagonalBlock_MPIAIJ,
2700:                                        NULL,
2701:                                 /*34*/ MatDuplicate_MPIAIJ,
2702:                                        NULL,
2703:                                        NULL,
2704:                                        NULL,
2705:                                        NULL,
2706:                                 /*39*/ MatAXPY_MPIAIJ,
2707:                                        MatCreateSubMatrices_MPIAIJ,
2708:                                        MatIncreaseOverlap_MPIAIJ,
2709:                                        MatGetValues_MPIAIJ,
2710:                                        MatCopy_MPIAIJ,
2711:                                 /*44*/ MatGetRowMax_MPIAIJ,
2712:                                        MatScale_MPIAIJ,
2713:                                        MatShift_MPIAIJ,
2714:                                        MatDiagonalSet_MPIAIJ,
2715:                                        MatZeroRowsColumns_MPIAIJ,
2716:                                 /*49*/ MatSetRandom_MPIAIJ,
2717:                                        NULL,
2718:                                        NULL,
2719:                                        NULL,
2720:                                        NULL,
2721:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2722:                                        NULL,
2723:                                        MatSetUnfactored_MPIAIJ,
2724:                                        MatPermute_MPIAIJ,
2725:                                        NULL,
2726:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2727:                                        MatDestroy_MPIAIJ,
2728:                                        MatView_MPIAIJ,
2729:                                        NULL,
2730:                                        NULL,
2731:                                 /*64*/ NULL,
2732:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2733:                                        NULL,
2734:                                        NULL,
2735:                                        NULL,
2736:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2737:                                        MatGetRowMinAbs_MPIAIJ,
2738:                                        NULL,
2739:                                        NULL,
2740:                                        NULL,
2741:                                        NULL,
2742:                                 /*75*/ MatFDColoringApply_AIJ,
2743:                                        MatSetFromOptions_MPIAIJ,
2744:                                        NULL,
2745:                                        NULL,
2746:                                        MatFindZeroDiagonals_MPIAIJ,
2747:                                 /*80*/ NULL,
2748:                                        NULL,
2749:                                        NULL,
2750:                                 /*83*/ MatLoad_MPIAIJ,
2751:                                        MatIsSymmetric_MPIAIJ,
2752:                                        NULL,
2753:                                        NULL,
2754:                                        NULL,
2755:                                        NULL,
2756:                                 /*89*/ NULL,
2757:                                        NULL,
2758:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2759:                                        NULL,
2760:                                        NULL,
2761:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2762:                                        NULL,
2763:                                        NULL,
2764:                                        NULL,
2765:                                        MatBindToCPU_MPIAIJ,
2766:                                 /*99*/ MatProductSetFromOptions_MPIAIJ,
2767:                                        NULL,
2768:                                        NULL,
2769:                                        MatConjugate_MPIAIJ,
2770:                                        NULL,
2771:                                 /*104*/MatSetValuesRow_MPIAIJ,
2772:                                        MatRealPart_MPIAIJ,
2773:                                        MatImaginaryPart_MPIAIJ,
2774:                                        NULL,
2775:                                        NULL,
2776:                                 /*109*/NULL,
2777:                                        NULL,
2778:                                        MatGetRowMin_MPIAIJ,
2779:                                        NULL,
2780:                                        MatMissingDiagonal_MPIAIJ,
2781:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2782:                                        NULL,
2783:                                        MatGetGhosts_MPIAIJ,
2784:                                        NULL,
2785:                                        NULL,
2786:                                 /*119*/MatMultDiagonalBlock_MPIAIJ,
2787:                                        NULL,
2788:                                        NULL,
2789:                                        NULL,
2790:                                        MatGetMultiProcBlock_MPIAIJ,
2791:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2792:                                        MatGetColumnNorms_MPIAIJ,
2793:                                        MatInvertBlockDiagonal_MPIAIJ,
2794:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2795:                                        MatCreateSubMatricesMPI_MPIAIJ,
2796:                                 /*129*/NULL,
2797:                                        NULL,
2798:                                        NULL,
2799:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2800:                                        NULL,
2801:                                 /*134*/NULL,
2802:                                        NULL,
2803:                                        NULL,
2804:                                        NULL,
2805:                                        NULL,
2806:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2807:                                        NULL,
2808:                                        NULL,
2809:                                        MatFDColoringSetUp_MPIXAIJ,
2810:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2811:                                        MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2812:                                 /*145*/NULL,
2813:                                        NULL,
2814:                                        NULL
2815: };

2817: /* ----------------------------------------------------------------------------------------*/

2819: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2820: {
2821:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2825:   MatStoreValues(aij->A);
2826:   MatStoreValues(aij->B);
2827:   return(0);
2828: }

2830: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2831: {
2832:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2836:   MatRetrieveValues(aij->A);
2837:   MatRetrieveValues(aij->B);
2838:   return(0);
2839: }

2841: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2842: {
2843:   Mat_MPIAIJ     *b;
2845:   PetscMPIInt    size;

2848:   PetscLayoutSetUp(B->rmap);
2849:   PetscLayoutSetUp(B->cmap);
2850:   b = (Mat_MPIAIJ*)B->data;

2852: #if defined(PETSC_USE_CTABLE)
2853:   PetscTableDestroy(&b->colmap);
2854: #else
2855:   PetscFree(b->colmap);
2856: #endif
2857:   PetscFree(b->garray);
2858:   VecDestroy(&b->lvec);
2859:   VecScatterDestroy(&b->Mvctx);

2861:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2862:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2863:   MatDestroy(&b->B);
2864:   MatCreate(PETSC_COMM_SELF,&b->B);
2865:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2866:   MatSetBlockSizesFromMats(b->B,B,B);
2867:   MatSetType(b->B,MATSEQAIJ);
2868:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2870:   if (!B->preallocated) {
2871:     MatCreate(PETSC_COMM_SELF,&b->A);
2872:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2873:     MatSetBlockSizesFromMats(b->A,B,B);
2874:     MatSetType(b->A,MATSEQAIJ);
2875:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2876:   }

2878:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2879:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2880:   B->preallocated  = PETSC_TRUE;
2881:   B->was_assembled = PETSC_FALSE;
2882:   B->assembled     = PETSC_FALSE;
2883:   return(0);
2884: }

2886: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2887: {
2888:   Mat_MPIAIJ     *b;

2893:   PetscLayoutSetUp(B->rmap);
2894:   PetscLayoutSetUp(B->cmap);
2895:   b = (Mat_MPIAIJ*)B->data;

2897: #if defined(PETSC_USE_CTABLE)
2898:   PetscTableDestroy(&b->colmap);
2899: #else
2900:   PetscFree(b->colmap);
2901: #endif
2902:   PetscFree(b->garray);
2903:   VecDestroy(&b->lvec);
2904:   VecScatterDestroy(&b->Mvctx);

2906:   MatResetPreallocation(b->A);
2907:   MatResetPreallocation(b->B);
2908:   B->preallocated  = PETSC_TRUE;
2909:   B->was_assembled = PETSC_FALSE;
2910:   B->assembled = PETSC_FALSE;
2911:   return(0);
2912: }

2914: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2915: {
2916:   Mat            mat;
2917:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2921:   *newmat = NULL;
2922:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2923:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2924:   MatSetBlockSizesFromMats(mat,matin,matin);
2925:   MatSetType(mat,((PetscObject)matin)->type_name);
2926:   a       = (Mat_MPIAIJ*)mat->data;

2928:   mat->factortype   = matin->factortype;
2929:   mat->assembled    = matin->assembled;
2930:   mat->insertmode   = NOT_SET_VALUES;
2931:   mat->preallocated = matin->preallocated;

2933:   a->size         = oldmat->size;
2934:   a->rank         = oldmat->rank;
2935:   a->donotstash   = oldmat->donotstash;
2936:   a->roworiented  = oldmat->roworiented;
2937:   a->rowindices   = NULL;
2938:   a->rowvalues    = NULL;
2939:   a->getrowactive = PETSC_FALSE;

2941:   PetscLayoutReference(matin->rmap,&mat->rmap);
2942:   PetscLayoutReference(matin->cmap,&mat->cmap);

2944:   if (oldmat->colmap) {
2945: #if defined(PETSC_USE_CTABLE)
2946:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2947: #else
2948:     PetscMalloc1(mat->cmap->N,&a->colmap);
2949:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2950:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2951: #endif
2952:   } else a->colmap = NULL;
2953:   if (oldmat->garray) {
2954:     PetscInt len;
2955:     len  = oldmat->B->cmap->n;
2956:     PetscMalloc1(len+1,&a->garray);
2957:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2958:     if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2959:   } else a->garray = NULL;

2961:   /* It may happen MatDuplicate is called with a non-assembled matrix
2962:      In fact, MatDuplicate only requires the matrix to be preallocated
2963:      This may happen inside a DMCreateMatrix_Shell */
2964:   if (oldmat->lvec) {
2965:     VecDuplicate(oldmat->lvec,&a->lvec);
2966:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2967:   }
2968:   if (oldmat->Mvctx) {
2969:     VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2970:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2971:   }
2972:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2973:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2974:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2975:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2976:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2977:   *newmat = mat;
2978:   return(0);
2979: }

2981: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2982: {
2983:   PetscBool      isbinary, ishdf5;

2989:   /* force binary viewer to load .info file if it has not yet done so */
2990:   PetscViewerSetUp(viewer);
2991:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2992:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2993:   if (isbinary) {
2994:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2995:   } else if (ishdf5) {
2996: #if defined(PETSC_HAVE_HDF5)
2997:     MatLoad_AIJ_HDF5(newMat,viewer);
2998: #else
2999:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
3000: #endif
3001:   } else {
3002:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
3003:   }
3004:   return(0);
3005: }

3007: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
3008: {
3009:   PetscInt       header[4],M,N,m,nz,rows,cols,sum,i;
3010:   PetscInt       *rowidxs,*colidxs;
3011:   PetscScalar    *matvals;

3015:   PetscViewerSetUp(viewer);

3017:   /* read in matrix header */
3018:   PetscViewerBinaryRead(viewer,header,4,NULL,PETSC_INT);
3019:   if (header[0] != MAT_FILE_CLASSID) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Not a matrix object in file");
3020:   M  = header[1]; N = header[2]; nz = header[3];
3021:   if (M < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix row size (%D) in file is negative",M);
3022:   if (N < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix column size (%D) in file is negative",N);
3023:   if (nz < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk, cannot load as MPIAIJ");

3025:   /* set block sizes from the viewer's .info file */
3026:   MatLoad_Binary_BlockSizes(mat,viewer);
3027:   /* set global sizes if not set already */
3028:   if (mat->rmap->N < 0) mat->rmap->N = M;
3029:   if (mat->cmap->N < 0) mat->cmap->N = N;
3030:   PetscLayoutSetUp(mat->rmap);
3031:   PetscLayoutSetUp(mat->cmap);

3033:   /* check if the matrix sizes are correct */
3034:   MatGetSize(mat,&rows,&cols);
3035:   if (M != rows || N != cols) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%D, %D) than the input matrix (%D, %D)",M,N,rows,cols);

3037:   /* read in row lengths and build row indices */
3038:   MatGetLocalSize(mat,&m,NULL);
3039:   PetscMalloc1(m+1,&rowidxs);
3040:   PetscViewerBinaryReadAll(viewer,rowidxs+1,m,PETSC_DECIDE,M,PETSC_INT);
3041:   rowidxs[0] = 0; for (i=0; i<m; i++) rowidxs[i+1] += rowidxs[i];
3042:   MPIU_Allreduce(&rowidxs[m],&sum,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)viewer));
3043:   if (sum != nz) SETERRQ2(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Inconsistent matrix data in file: nonzeros = %D, sum-row-lengths = %D\n",nz,sum);
3044:   /* read in column indices and matrix values */
3045:   PetscMalloc2(rowidxs[m],&colidxs,rowidxs[m],&matvals);
3046:   PetscViewerBinaryReadAll(viewer,colidxs,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
3047:   PetscViewerBinaryReadAll(viewer,matvals,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
3048:   /* store matrix indices and values */
3049:   MatMPIAIJSetPreallocationCSR(mat,rowidxs,colidxs,matvals);
3050:   PetscFree(rowidxs);
3051:   PetscFree2(colidxs,matvals);
3052:   return(0);
3053: }

3055: /* Not scalable because of ISAllGather() unless getting all columns. */
3056: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3057: {
3059:   IS             iscol_local;
3060:   PetscBool      isstride;
3061:   PetscMPIInt    lisstride=0,gisstride;

3064:   /* check if we are grabbing all columns*/
3065:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

3067:   if (isstride) {
3068:     PetscInt  start,len,mstart,mlen;
3069:     ISStrideGetInfo(iscol,&start,NULL);
3070:     ISGetLocalSize(iscol,&len);
3071:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3072:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3073:   }

3075:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3076:   if (gisstride) {
3077:     PetscInt N;
3078:     MatGetSize(mat,NULL,&N);
3079:     ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol_local);
3080:     ISSetIdentity(iscol_local);
3081:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3082:   } else {
3083:     PetscInt cbs;
3084:     ISGetBlockSize(iscol,&cbs);
3085:     ISAllGather(iscol,&iscol_local);
3086:     ISSetBlockSize(iscol_local,cbs);
3087:   }

3089:   *isseq = iscol_local;
3090:   return(0);
3091: }

3093: /*
3094:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3095:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3097:  Input Parameters:
3098:    mat - matrix
3099:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3100:            i.e., mat->rstart <= isrow[i] < mat->rend
3101:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3102:            i.e., mat->cstart <= iscol[i] < mat->cend
3103:  Output Parameter:
3104:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3105:    iscol_o - sequential column index set for retrieving mat->B
3106:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3107:  */
3108: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3109: {
3111:   Vec            x,cmap;
3112:   const PetscInt *is_idx;
3113:   PetscScalar    *xarray,*cmaparray;
3114:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3115:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3116:   Mat            B=a->B;
3117:   Vec            lvec=a->lvec,lcmap;
3118:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3119:   MPI_Comm       comm;
3120:   VecScatter     Mvctx=a->Mvctx;

3123:   PetscObjectGetComm((PetscObject)mat,&comm);
3124:   ISGetLocalSize(iscol,&ncols);

3126:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3127:   MatCreateVecs(mat,&x,NULL);
3128:   VecSet(x,-1.0);
3129:   VecDuplicate(x,&cmap);
3130:   VecSet(cmap,-1.0);

3132:   /* Get start indices */
3133:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3134:   isstart -= ncols;
3135:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3137:   ISGetIndices(iscol,&is_idx);
3138:   VecGetArray(x,&xarray);
3139:   VecGetArray(cmap,&cmaparray);
3140:   PetscMalloc1(ncols,&idx);
3141:   for (i=0; i<ncols; i++) {
3142:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3143:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3144:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3145:   }
3146:   VecRestoreArray(x,&xarray);
3147:   VecRestoreArray(cmap,&cmaparray);
3148:   ISRestoreIndices(iscol,&is_idx);

3150:   /* Get iscol_d */
3151:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3152:   ISGetBlockSize(iscol,&i);
3153:   ISSetBlockSize(*iscol_d,i);

3155:   /* Get isrow_d */
3156:   ISGetLocalSize(isrow,&m);
3157:   rstart = mat->rmap->rstart;
3158:   PetscMalloc1(m,&idx);
3159:   ISGetIndices(isrow,&is_idx);
3160:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3161:   ISRestoreIndices(isrow,&is_idx);

3163:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3164:   ISGetBlockSize(isrow,&i);
3165:   ISSetBlockSize(*isrow_d,i);

3167:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3168:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3169:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3171:   VecDuplicate(lvec,&lcmap);

3173:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3174:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3176:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3177:   /* off-process column indices */
3178:   count = 0;
3179:   PetscMalloc1(Bn,&idx);
3180:   PetscMalloc1(Bn,&cmap1);

3182:   VecGetArray(lvec,&xarray);
3183:   VecGetArray(lcmap,&cmaparray);
3184:   for (i=0; i<Bn; i++) {
3185:     if (PetscRealPart(xarray[i]) > -1.0) {
3186:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3187:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3188:       count++;
3189:     }
3190:   }
3191:   VecRestoreArray(lvec,&xarray);
3192:   VecRestoreArray(lcmap,&cmaparray);

3194:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3195:   /* cannot ensure iscol_o has same blocksize as iscol! */

3197:   PetscFree(idx);
3198:   *garray = cmap1;

3200:   VecDestroy(&x);
3201:   VecDestroy(&cmap);
3202:   VecDestroy(&lcmap);
3203:   return(0);
3204: }

3206: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3207: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3208: {
3210:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3211:   Mat            M = NULL;
3212:   MPI_Comm       comm;
3213:   IS             iscol_d,isrow_d,iscol_o;
3214:   Mat            Asub = NULL,Bsub = NULL;
3215:   PetscInt       n;

3218:   PetscObjectGetComm((PetscObject)mat,&comm);

3220:   if (call == MAT_REUSE_MATRIX) {
3221:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3222:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3223:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3225:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3226:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3228:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3229:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3231:     /* Update diagonal and off-diagonal portions of submat */
3232:     asub = (Mat_MPIAIJ*)(*submat)->data;
3233:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3234:     ISGetLocalSize(iscol_o,&n);
3235:     if (n) {
3236:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3237:     }
3238:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3239:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3241:   } else { /* call == MAT_INITIAL_MATRIX) */
3242:     const PetscInt *garray;
3243:     PetscInt        BsubN;

3245:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3246:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3248:     /* Create local submatrices Asub and Bsub */
3249:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3250:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3252:     /* Create submatrix M */
3253:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3255:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3256:     asub = (Mat_MPIAIJ*)M->data;

3258:     ISGetLocalSize(iscol_o,&BsubN);
3259:     n = asub->B->cmap->N;
3260:     if (BsubN > n) {
3261:       /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3262:       const PetscInt *idx;
3263:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3264:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3266:       PetscMalloc1(n,&idx_new);
3267:       j = 0;
3268:       ISGetIndices(iscol_o,&idx);
3269:       for (i=0; i<n; i++) {
3270:         if (j >= BsubN) break;
3271:         while (subgarray[i] > garray[j]) j++;

3273:         if (subgarray[i] == garray[j]) {
3274:           idx_new[i] = idx[j++];
3275:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3276:       }
3277:       ISRestoreIndices(iscol_o,&idx);

3279:       ISDestroy(&iscol_o);
3280:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3282:     } else if (BsubN < n) {
3283:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3284:     }

3286:     PetscFree(garray);
3287:     *submat = M;

3289:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3290:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3291:     ISDestroy(&isrow_d);

3293:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3294:     ISDestroy(&iscol_d);

3296:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3297:     ISDestroy(&iscol_o);
3298:   }
3299:   return(0);
3300: }

3302: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3303: {
3305:   IS             iscol_local=NULL,isrow_d;
3306:   PetscInt       csize;
3307:   PetscInt       n,i,j,start,end;
3308:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3309:   MPI_Comm       comm;

3312:   /* If isrow has same processor distribution as mat,
3313:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3314:   if (call == MAT_REUSE_MATRIX) {
3315:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3316:     if (isrow_d) {
3317:       sameRowDist  = PETSC_TRUE;
3318:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3319:     } else {
3320:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3321:       if (iscol_local) {
3322:         sameRowDist  = PETSC_TRUE;
3323:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3324:       }
3325:     }
3326:   } else {
3327:     /* Check if isrow has same processor distribution as mat */
3328:     sameDist[0] = PETSC_FALSE;
3329:     ISGetLocalSize(isrow,&n);
3330:     if (!n) {
3331:       sameDist[0] = PETSC_TRUE;
3332:     } else {
3333:       ISGetMinMax(isrow,&i,&j);
3334:       MatGetOwnershipRange(mat,&start,&end);
3335:       if (i >= start && j < end) {
3336:         sameDist[0] = PETSC_TRUE;
3337:       }
3338:     }

3340:     /* Check if iscol has same processor distribution as mat */
3341:     sameDist[1] = PETSC_FALSE;
3342:     ISGetLocalSize(iscol,&n);
3343:     if (!n) {
3344:       sameDist[1] = PETSC_TRUE;
3345:     } else {
3346:       ISGetMinMax(iscol,&i,&j);
3347:       MatGetOwnershipRangeColumn(mat,&start,&end);
3348:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3349:     }

3351:     PetscObjectGetComm((PetscObject)mat,&comm);
3352:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3353:     sameRowDist = tsameDist[0];
3354:   }

3356:   if (sameRowDist) {
3357:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3358:       /* isrow and iscol have same processor distribution as mat */
3359:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3360:       return(0);
3361:     } else { /* sameRowDist */
3362:       /* isrow has same processor distribution as mat */
3363:       if (call == MAT_INITIAL_MATRIX) {
3364:         PetscBool sorted;
3365:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3366:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3367:         ISGetSize(iscol,&i);
3368:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3370:         ISSorted(iscol_local,&sorted);
3371:         if (sorted) {
3372:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3373:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3374:           return(0);
3375:         }
3376:       } else { /* call == MAT_REUSE_MATRIX */
3377:         IS iscol_sub;
3378:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3379:         if (iscol_sub) {
3380:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3381:           return(0);
3382:         }
3383:       }
3384:     }
3385:   }

3387:   /* General case: iscol -> iscol_local which has global size of iscol */
3388:   if (call == MAT_REUSE_MATRIX) {
3389:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3390:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3391:   } else {
3392:     if (!iscol_local) {
3393:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3394:     }
3395:   }

3397:   ISGetLocalSize(iscol,&csize);
3398:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3400:   if (call == MAT_INITIAL_MATRIX) {
3401:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3402:     ISDestroy(&iscol_local);
3403:   }
3404:   return(0);
3405: }

3407: /*@C
3408:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3409:          and "off-diagonal" part of the matrix in CSR format.

3411:    Collective

3413:    Input Parameters:
3414: +  comm - MPI communicator
3415: .  A - "diagonal" portion of matrix
3416: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3417: -  garray - global index of B columns

3419:    Output Parameter:
3420: .   mat - the matrix, with input A as its local diagonal matrix
3421:    Level: advanced

3423:    Notes:
3424:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3425:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3427: .seealso: MatCreateMPIAIJWithSplitArrays()
3428: @*/
3429: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3430: {
3431:   PetscErrorCode    ierr;
3432:   Mat_MPIAIJ        *maij;
3433:   Mat_SeqAIJ        *b=(Mat_SeqAIJ*)B->data,*bnew;
3434:   PetscInt          *oi=b->i,*oj=b->j,i,nz,col;
3435:   const PetscScalar *oa;
3436:   Mat               Bnew;
3437:   PetscInt          m,n,N;

3440:   MatCreate(comm,mat);
3441:   MatGetSize(A,&m,&n);
3442:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3443:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3444:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3445:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3447:   /* Get global columns of mat */
3448:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3450:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3451:   MatSetType(*mat,MATMPIAIJ);
3452:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3453:   maij = (Mat_MPIAIJ*)(*mat)->data;

3455:   (*mat)->preallocated = PETSC_TRUE;

3457:   PetscLayoutSetUp((*mat)->rmap);
3458:   PetscLayoutSetUp((*mat)->cmap);

3460:   /* Set A as diagonal portion of *mat */
3461:   maij->A = A;

3463:   nz = oi[m];
3464:   for (i=0; i<nz; i++) {
3465:     col   = oj[i];
3466:     oj[i] = garray[col];
3467:   }

3469:   /* Set Bnew as off-diagonal portion of *mat */
3470:   MatSeqAIJGetArrayRead(B,&oa);
3471:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,(PetscScalar*)oa,&Bnew);
3472:   MatSeqAIJRestoreArrayRead(B,&oa);
3473:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3474:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3475:   maij->B     = Bnew;

3477:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3479:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3480:   b->free_a       = PETSC_FALSE;
3481:   b->free_ij      = PETSC_FALSE;
3482:   MatDestroy(&B);

3484:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3485:   bnew->free_a       = PETSC_TRUE;
3486:   bnew->free_ij      = PETSC_TRUE;

3488:   /* condense columns of maij->B */
3489:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3490:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3491:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3492:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3493:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3494:   return(0);
3495: }

3497: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3499: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3500: {
3502:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3503:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3504:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3505:   Mat            M,Msub,B=a->B;
3506:   MatScalar      *aa;
3507:   Mat_SeqAIJ     *aij;
3508:   PetscInt       *garray = a->garray,*colsub,Ncols;
3509:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3510:   IS             iscol_sub,iscmap;
3511:   const PetscInt *is_idx,*cmap;
3512:   PetscBool      allcolumns=PETSC_FALSE;
3513:   MPI_Comm       comm;

3516:   PetscObjectGetComm((PetscObject)mat,&comm);
3517:   if (call == MAT_REUSE_MATRIX) {
3518:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3519:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3520:     ISGetLocalSize(iscol_sub,&count);

3522:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3523:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3525:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3526:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3528:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3530:   } else { /* call == MAT_INITIAL_MATRIX) */
3531:     PetscBool flg;

3533:     ISGetLocalSize(iscol,&n);
3534:     ISGetSize(iscol,&Ncols);

3536:     /* (1) iscol -> nonscalable iscol_local */
3537:     /* Check for special case: each processor gets entire matrix columns */
3538:     ISIdentity(iscol_local,&flg);
3539:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3540:     MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));
3541:     if (allcolumns) {
3542:       iscol_sub = iscol_local;
3543:       PetscObjectReference((PetscObject)iscol_local);
3544:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3546:     } else {
3547:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3548:       PetscInt *idx,*cmap1,k;
3549:       PetscMalloc1(Ncols,&idx);
3550:       PetscMalloc1(Ncols,&cmap1);
3551:       ISGetIndices(iscol_local,&is_idx);
3552:       count = 0;
3553:       k     = 0;
3554:       for (i=0; i<Ncols; i++) {
3555:         j = is_idx[i];
3556:         if (j >= cstart && j < cend) {
3557:           /* diagonal part of mat */
3558:           idx[count]     = j;
3559:           cmap1[count++] = i; /* column index in submat */
3560:         } else if (Bn) {
3561:           /* off-diagonal part of mat */
3562:           if (j == garray[k]) {
3563:             idx[count]     = j;
3564:             cmap1[count++] = i;  /* column index in submat */
3565:           } else if (j > garray[k]) {
3566:             while (j > garray[k] && k < Bn-1) k++;
3567:             if (j == garray[k]) {
3568:               idx[count]     = j;
3569:               cmap1[count++] = i; /* column index in submat */
3570:             }
3571:           }
3572:         }
3573:       }
3574:       ISRestoreIndices(iscol_local,&is_idx);

3576:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3577:       ISGetBlockSize(iscol,&cbs);
3578:       ISSetBlockSize(iscol_sub,cbs);

3580:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3581:     }

3583:     /* (3) Create sequential Msub */
3584:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3585:   }

3587:   ISGetLocalSize(iscol_sub,&count);
3588:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3589:   ii   = aij->i;
3590:   ISGetIndices(iscmap,&cmap);

3592:   /*
3593:       m - number of local rows
3594:       Ncols - number of columns (same on all processors)
3595:       rstart - first row in new global matrix generated
3596:   */
3597:   MatGetSize(Msub,&m,NULL);

3599:   if (call == MAT_INITIAL_MATRIX) {
3600:     /* (4) Create parallel newmat */
3601:     PetscMPIInt    rank,size;
3602:     PetscInt       csize;

3604:     MPI_Comm_size(comm,&size);
3605:     MPI_Comm_rank(comm,&rank);

3607:     /*
3608:         Determine the number of non-zeros in the diagonal and off-diagonal
3609:         portions of the matrix in order to do correct preallocation
3610:     */

3612:     /* first get start and end of "diagonal" columns */
3613:     ISGetLocalSize(iscol,&csize);
3614:     if (csize == PETSC_DECIDE) {
3615:       ISGetSize(isrow,&mglobal);
3616:       if (mglobal == Ncols) { /* square matrix */
3617:         nlocal = m;
3618:       } else {
3619:         nlocal = Ncols/size + ((Ncols % size) > rank);
3620:       }
3621:     } else {
3622:       nlocal = csize;
3623:     }
3624:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3625:     rstart = rend - nlocal;
3626:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3628:     /* next, compute all the lengths */
3629:     jj    = aij->j;
3630:     PetscMalloc1(2*m+1,&dlens);
3631:     olens = dlens + m;
3632:     for (i=0; i<m; i++) {
3633:       jend = ii[i+1] - ii[i];
3634:       olen = 0;
3635:       dlen = 0;
3636:       for (j=0; j<jend; j++) {
3637:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3638:         else dlen++;
3639:         jj++;
3640:       }
3641:       olens[i] = olen;
3642:       dlens[i] = dlen;
3643:     }

3645:     ISGetBlockSize(isrow,&bs);
3646:     ISGetBlockSize(iscol,&cbs);

3648:     MatCreate(comm,&M);
3649:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3650:     MatSetBlockSizes(M,bs,cbs);
3651:     MatSetType(M,((PetscObject)mat)->type_name);
3652:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3653:     PetscFree(dlens);

3655:   } else { /* call == MAT_REUSE_MATRIX */
3656:     M    = *newmat;
3657:     MatGetLocalSize(M,&i,NULL);
3658:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3659:     MatZeroEntries(M);
3660:     /*
3661:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3662:        rather than the slower MatSetValues().
3663:     */
3664:     M->was_assembled = PETSC_TRUE;
3665:     M->assembled     = PETSC_FALSE;
3666:   }

3668:   /* (5) Set values of Msub to *newmat */
3669:   PetscMalloc1(count,&colsub);
3670:   MatGetOwnershipRange(M,&rstart,NULL);

3672:   jj   = aij->j;
3673:   MatSeqAIJGetArrayRead(Msub,(const PetscScalar**)&aa);
3674:   for (i=0; i<m; i++) {
3675:     row = rstart + i;
3676:     nz  = ii[i+1] - ii[i];
3677:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3678:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3679:     jj += nz; aa += nz;
3680:   }
3681:   MatSeqAIJRestoreArrayRead(Msub,(const PetscScalar**)&aa);
3682:   ISRestoreIndices(iscmap,&cmap);

3684:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3685:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3687:   PetscFree(colsub);

3689:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3690:   if (call == MAT_INITIAL_MATRIX) {
3691:     *newmat = M;
3692:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3693:     MatDestroy(&Msub);

3695:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3696:     ISDestroy(&iscol_sub);

3698:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3699:     ISDestroy(&iscmap);

3701:     if (iscol_local) {
3702:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3703:       ISDestroy(&iscol_local);
3704:     }
3705:   }
3706:   return(0);
3707: }

3709: /*
3710:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3711:   in local and then by concatenating the local matrices the end result.
3712:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3714:   Note: This requires a sequential iscol with all indices.
3715: */
3716: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3717: {
3719:   PetscMPIInt    rank,size;
3720:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3721:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3722:   Mat            M,Mreuse;
3723:   MatScalar      *aa,*vwork;
3724:   MPI_Comm       comm;
3725:   Mat_SeqAIJ     *aij;
3726:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3729:   PetscObjectGetComm((PetscObject)mat,&comm);
3730:   MPI_Comm_rank(comm,&rank);
3731:   MPI_Comm_size(comm,&size);

3733:   /* Check for special case: each processor gets entire matrix columns */
3734:   ISIdentity(iscol,&colflag);
3735:   ISGetLocalSize(iscol,&n);
3736:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3737:   MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));

3739:   if (call ==  MAT_REUSE_MATRIX) {
3740:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3741:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3742:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3743:   } else {
3744:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3745:   }

3747:   /*
3748:       m - number of local rows
3749:       n - number of columns (same on all processors)
3750:       rstart - first row in new global matrix generated
3751:   */
3752:   MatGetSize(Mreuse,&m,&n);
3753:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3754:   if (call == MAT_INITIAL_MATRIX) {
3755:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3756:     ii  = aij->i;
3757:     jj  = aij->j;

3759:     /*
3760:         Determine the number of non-zeros in the diagonal and off-diagonal
3761:         portions of the matrix in order to do correct preallocation
3762:     */

3764:     /* first get start and end of "diagonal" columns */
3765:     if (csize == PETSC_DECIDE) {
3766:       ISGetSize(isrow,&mglobal);
3767:       if (mglobal == n) { /* square matrix */
3768:         nlocal = m;
3769:       } else {
3770:         nlocal = n/size + ((n % size) > rank);
3771:       }
3772:     } else {
3773:       nlocal = csize;
3774:     }
3775:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3776:     rstart = rend - nlocal;
3777:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3779:     /* next, compute all the lengths */
3780:     PetscMalloc1(2*m+1,&dlens);
3781:     olens = dlens + m;
3782:     for (i=0; i<m; i++) {
3783:       jend = ii[i+1] - ii[i];
3784:       olen = 0;
3785:       dlen = 0;
3786:       for (j=0; j<jend; j++) {
3787:         if (*jj < rstart || *jj >= rend) olen++;
3788:         else dlen++;
3789:         jj++;
3790:       }
3791:       olens[i] = olen;
3792:       dlens[i] = dlen;
3793:     }
3794:     MatCreate(comm,&M);
3795:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3796:     MatSetBlockSizes(M,bs,cbs);
3797:     MatSetType(M,((PetscObject)mat)->type_name);
3798:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3799:     PetscFree(dlens);
3800:   } else {
3801:     PetscInt ml,nl;

3803:     M    = *newmat;
3804:     MatGetLocalSize(M,&ml,&nl);
3805:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3806:     MatZeroEntries(M);
3807:     /*
3808:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3809:        rather than the slower MatSetValues().
3810:     */
3811:     M->was_assembled = PETSC_TRUE;
3812:     M->assembled     = PETSC_FALSE;
3813:   }
3814:   MatGetOwnershipRange(M,&rstart,&rend);
3815:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3816:   ii   = aij->i;
3817:   jj   = aij->j;

3819:   /* trigger copy to CPU if needed */
3820:   MatSeqAIJGetArrayRead(Mreuse,(const PetscScalar**)&aa);
3821:   for (i=0; i<m; i++) {
3822:     row   = rstart + i;
3823:     nz    = ii[i+1] - ii[i];
3824:     cwork = jj; jj += nz;
3825:     vwork = aa; aa += nz;
3826:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3827:   }
3828:   MatSeqAIJRestoreArrayRead(Mreuse,(const PetscScalar**)&aa);

3830:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3831:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3832:   *newmat = M;

3834:   /* save submatrix used in processor for next request */
3835:   if (call ==  MAT_INITIAL_MATRIX) {
3836:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3837:     MatDestroy(&Mreuse);
3838:   }
3839:   return(0);
3840: }

3842: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3843: {
3844:   PetscInt       m,cstart, cend,j,nnz,i,d;
3845:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3846:   const PetscInt *JJ;
3848:   PetscBool      nooffprocentries;

3851:   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3853:   PetscLayoutSetUp(B->rmap);
3854:   PetscLayoutSetUp(B->cmap);
3855:   m      = B->rmap->n;
3856:   cstart = B->cmap->rstart;
3857:   cend   = B->cmap->rend;
3858:   rstart = B->rmap->rstart;

3860:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3862:   if (PetscDefined(USE_DEBUG)) {
3863:     for (i=0; i<m; i++) {
3864:       nnz = Ii[i+1]- Ii[i];
3865:       JJ  = J + Ii[i];
3866:       if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3867:       if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3868:       if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3869:     }
3870:   }

3872:   for (i=0; i<m; i++) {
3873:     nnz     = Ii[i+1]- Ii[i];
3874:     JJ      = J + Ii[i];
3875:     nnz_max = PetscMax(nnz_max,nnz);
3876:     d       = 0;
3877:     for (j=0; j<nnz; j++) {
3878:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3879:     }
3880:     d_nnz[i] = d;
3881:     o_nnz[i] = nnz - d;
3882:   }
3883:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3884:   PetscFree2(d_nnz,o_nnz);

3886:   for (i=0; i<m; i++) {
3887:     ii   = i + rstart;
3888:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3889:   }
3890:   nooffprocentries    = B->nooffprocentries;
3891:   B->nooffprocentries = PETSC_TRUE;
3892:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3893:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3894:   B->nooffprocentries = nooffprocentries;

3896:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3897:   return(0);
3898: }

3900: /*@
3901:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3902:    (the default parallel PETSc format).

3904:    Collective

3906:    Input Parameters:
3907: +  B - the matrix
3908: .  i - the indices into j for the start of each local row (starts with zero)
3909: .  j - the column indices for each local row (starts with zero)
3910: -  v - optional values in the matrix

3912:    Level: developer

3914:    Notes:
3915:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3916:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
3917:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3919:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3921:        The format which is used for the sparse matrix input, is equivalent to a
3922:     row-major ordering.. i.e for the following matrix, the input data expected is
3923:     as shown

3925: $        1 0 0
3926: $        2 0 3     P0
3927: $       -------
3928: $        4 5 6     P1
3929: $
3930: $     Process0 [P0]: rows_owned=[0,1]
3931: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3932: $        j =  {0,0,2}  [size = 3]
3933: $        v =  {1,2,3}  [size = 3]
3934: $
3935: $     Process1 [P1]: rows_owned=[2]
3936: $        i =  {0,3}    [size = nrow+1  = 1+1]
3937: $        j =  {0,1,2}  [size = 3]
3938: $        v =  {4,5,6}  [size = 3]

3940: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3941:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3942: @*/
3943: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3944: {

3948:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3949:   return(0);
3950: }

3952: /*@C
3953:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3954:    (the default parallel PETSc format).  For good matrix assembly performance
3955:    the user should preallocate the matrix storage by setting the parameters
3956:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3957:    performance can be increased by more than a factor of 50.

3959:    Collective

3961:    Input Parameters:
3962: +  B - the matrix
3963: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3964:            (same value is used for all local rows)
3965: .  d_nnz - array containing the number of nonzeros in the various rows of the
3966:            DIAGONAL portion of the local submatrix (possibly different for each row)
3967:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3968:            The size of this array is equal to the number of local rows, i.e 'm'.
3969:            For matrices that will be factored, you must leave room for (and set)
3970:            the diagonal entry even if it is zero.
3971: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3972:            submatrix (same value is used for all local rows).
3973: -  o_nnz - array containing the number of nonzeros in the various rows of the
3974:            OFF-DIAGONAL portion of the local submatrix (possibly different for
3975:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3976:            structure. The size of this array is equal to the number
3977:            of local rows, i.e 'm'.

3979:    If the *_nnz parameter is given then the *_nz parameter is ignored

3981:    The AIJ format (also called the Yale sparse matrix format or
3982:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3983:    storage.  The stored row and column indices begin with zero.
3984:    See Users-Manual: ch_mat for details.

3986:    The parallel matrix is partitioned such that the first m0 rows belong to
3987:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3988:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

3990:    The DIAGONAL portion of the local submatrix of a processor can be defined
3991:    as the submatrix which is obtained by extraction the part corresponding to
3992:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3993:    first row that belongs to the processor, r2 is the last row belonging to
3994:    the this processor, and c1-c2 is range of indices of the local part of a
3995:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
3996:    common case of a square matrix, the row and column ranges are the same and
3997:    the DIAGONAL part is also square. The remaining portion of the local
3998:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

4000:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4002:    You can call MatGetInfo() to get information on how effective the preallocation was;
4003:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4004:    You can also run with the option -info and look for messages with the string
4005:    malloc in them to see if additional memory allocation was needed.

4007:    Example usage:

4009:    Consider the following 8x8 matrix with 34 non-zero values, that is
4010:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4011:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4012:    as follows:

4014: .vb
4015:             1  2  0  |  0  3  0  |  0  4
4016:     Proc0   0  5  6  |  7  0  0  |  8  0
4017:             9  0 10  | 11  0  0  | 12  0
4018:     -------------------------------------
4019:            13  0 14  | 15 16 17  |  0  0
4020:     Proc1   0 18  0  | 19 20 21  |  0  0
4021:             0  0  0  | 22 23  0  | 24  0
4022:     -------------------------------------
4023:     Proc2  25 26 27  |  0  0 28  | 29  0
4024:            30  0  0  | 31 32 33  |  0 34
4025: .ve

4027:    This can be represented as a collection of submatrices as:

4029: .vb
4030:       A B C
4031:       D E F
4032:       G H I
4033: .ve

4035:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4036:    owned by proc1, G,H,I are owned by proc2.

4038:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4039:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4040:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4042:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4043:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4044:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4045:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4046:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4047:    matrix, ans [DF] as another SeqAIJ matrix.

4049:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4050:    allocated for every row of the local diagonal submatrix, and o_nz
4051:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4052:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4053:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4054:    In this case, the values of d_nz,o_nz are:
4055: .vb
4056:      proc0 : dnz = 2, o_nz = 2
4057:      proc1 : dnz = 3, o_nz = 2
4058:      proc2 : dnz = 1, o_nz = 4
4059: .ve
4060:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4061:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4062:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4063:    34 values.

4065:    When d_nnz, o_nnz parameters are specified, the storage is specified
4066:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4067:    In the above case the values for d_nnz,o_nnz are:
4068: .vb
4069:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4070:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4071:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4072: .ve
4073:    Here the space allocated is sum of all the above values i.e 34, and
4074:    hence pre-allocation is perfect.

4076:    Level: intermediate

4078: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4079:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4080: @*/
4081: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4082: {

4088:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4089:   return(0);
4090: }

4092: /*@
4093:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4094:          CSR format for the local rows.

4096:    Collective

4098:    Input Parameters:
4099: +  comm - MPI communicator
4100: .  m - number of local rows (Cannot be PETSC_DECIDE)
4101: .  n - This value should be the same as the local size used in creating the
4102:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4103:        calculated if N is given) For square matrices n is almost always m.
4104: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4105: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4106: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4107: .   j - column indices
4108: -   a - matrix values

4110:    Output Parameter:
4111: .   mat - the matrix

4113:    Level: intermediate

4115:    Notes:
4116:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4117:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4118:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4120:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4122:        The format which is used for the sparse matrix input, is equivalent to a
4123:     row-major ordering.. i.e for the following matrix, the input data expected is
4124:     as shown

4126:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

4128: $        1 0 0
4129: $        2 0 3     P0
4130: $       -------
4131: $        4 5 6     P1
4132: $
4133: $     Process0 [P0]: rows_owned=[0,1]
4134: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4135: $        j =  {0,0,2}  [size = 3]
4136: $        v =  {1,2,3}  [size = 3]
4137: $
4138: $     Process1 [P1]: rows_owned=[2]
4139: $        i =  {0,3}    [size = nrow+1  = 1+1]
4140: $        j =  {0,1,2}  [size = 3]
4141: $        v =  {4,5,6}  [size = 3]

4143: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4144:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4145: @*/
4146: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4147: {

4151:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4152:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4153:   MatCreate(comm,mat);
4154:   MatSetSizes(*mat,m,n,M,N);
4155:   /* MatSetBlockSizes(M,bs,cbs); */
4156:   MatSetType(*mat,MATMPIAIJ);
4157:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4158:   return(0);
4159: }

4161: /*@
4162:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4163:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4165:    Collective

4167:    Input Parameters:
4168: +  mat - the matrix
4169: .  m - number of local rows (Cannot be PETSC_DECIDE)
4170: .  n - This value should be the same as the local size used in creating the
4171:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4172:        calculated if N is given) For square matrices n is almost always m.
4173: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4174: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4175: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4176: .  J - column indices
4177: -  v - matrix values

4179:    Level: intermediate

4181: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4182:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4183: @*/
4184: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4185: {
4187:   PetscInt       cstart,nnz,i,j;
4188:   PetscInt       *ld;
4189:   PetscBool      nooffprocentries;
4190:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4191:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data, *Ao  = (Mat_SeqAIJ*)Aij->B->data;
4192:   PetscScalar    *ad = Ad->a, *ao = Ao->a;
4193:   const PetscInt *Adi = Ad->i;
4194:   PetscInt       ldi,Iii,md;

4197:   if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4198:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4199:   if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4200:   if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");

4202:   cstart = mat->cmap->rstart;
4203:   if (!Aij->ld) {
4204:     /* count number of entries below block diagonal */
4205:     PetscCalloc1(m,&ld);
4206:     Aij->ld = ld;
4207:     for (i=0; i<m; i++) {
4208:       nnz  = Ii[i+1]- Ii[i];
4209:       j     = 0;
4210:       while  (J[j] < cstart && j < nnz) {j++;}
4211:       J    += nnz;
4212:       ld[i] = j;
4213:     }
4214:   } else {
4215:     ld = Aij->ld;
4216:   }

4218:   for (i=0; i<m; i++) {
4219:     nnz  = Ii[i+1]- Ii[i];
4220:     Iii  = Ii[i];
4221:     ldi  = ld[i];
4222:     md   = Adi[i+1]-Adi[i];
4223:     PetscArraycpy(ao,v + Iii,ldi);
4224:     PetscArraycpy(ad,v + Iii + ldi,md);
4225:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4226:     ad  += md;
4227:     ao  += nnz - md;
4228:   }
4229:   nooffprocentries      = mat->nooffprocentries;
4230:   mat->nooffprocentries = PETSC_TRUE;
4231:   PetscObjectStateIncrease((PetscObject)Aij->A);
4232:   PetscObjectStateIncrease((PetscObject)Aij->B);
4233:   PetscObjectStateIncrease((PetscObject)mat);
4234:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4235:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4236:   mat->nooffprocentries = nooffprocentries;
4237:   return(0);
4238: }

4240: /*@C
4241:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4242:    (the default parallel PETSc format).  For good matrix assembly performance
4243:    the user should preallocate the matrix storage by setting the parameters
4244:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4245:    performance can be increased by more than a factor of 50.

4247:    Collective

4249:    Input Parameters:
4250: +  comm - MPI communicator
4251: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4252:            This value should be the same as the local size used in creating the
4253:            y vector for the matrix-vector product y = Ax.
4254: .  n - This value should be the same as the local size used in creating the
4255:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4256:        calculated if N is given) For square matrices n is almost always m.
4257: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4258: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4259: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4260:            (same value is used for all local rows)
4261: .  d_nnz - array containing the number of nonzeros in the various rows of the
4262:            DIAGONAL portion of the local submatrix (possibly different for each row)
4263:            or NULL, if d_nz is used to specify the nonzero structure.
4264:            The size of this array is equal to the number of local rows, i.e 'm'.
4265: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4266:            submatrix (same value is used for all local rows).
4267: -  o_nnz - array containing the number of nonzeros in the various rows of the
4268:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4269:            each row) or NULL, if o_nz is used to specify the nonzero
4270:            structure. The size of this array is equal to the number
4271:            of local rows, i.e 'm'.

4273:    Output Parameter:
4274: .  A - the matrix

4276:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4277:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4278:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4280:    Notes:
4281:    If the *_nnz parameter is given then the *_nz parameter is ignored

4283:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4284:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4285:    storage requirements for this matrix.

4287:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4288:    processor than it must be used on all processors that share the object for
4289:    that argument.

4291:    The user MUST specify either the local or global matrix dimensions
4292:    (possibly both).

4294:    The parallel matrix is partitioned across processors such that the
4295:    first m0 rows belong to process 0, the next m1 rows belong to
4296:    process 1, the next m2 rows belong to process 2 etc.. where
4297:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4298:    values corresponding to [m x N] submatrix.

4300:    The columns are logically partitioned with the n0 columns belonging
4301:    to 0th partition, the next n1 columns belonging to the next
4302:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4304:    The DIAGONAL portion of the local submatrix on any given processor
4305:    is the submatrix corresponding to the rows and columns m,n
4306:    corresponding to the given processor. i.e diagonal matrix on
4307:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4308:    etc. The remaining portion of the local submatrix [m x (N-n)]
4309:    constitute the OFF-DIAGONAL portion. The example below better
4310:    illustrates this concept.

4312:    For a square global matrix we define each processor's diagonal portion
4313:    to be its local rows and the corresponding columns (a square submatrix);
4314:    each processor's off-diagonal portion encompasses the remainder of the
4315:    local matrix (a rectangular submatrix).

4317:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4319:    When calling this routine with a single process communicator, a matrix of
4320:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4321:    type of communicator, use the construction mechanism
4322: .vb
4323:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4324: .ve

4326: $     MatCreate(...,&A);
4327: $     MatSetType(A,MATMPIAIJ);
4328: $     MatSetSizes(A, m,n,M,N);
4329: $     MatMPIAIJSetPreallocation(A,...);

4331:    By default, this format uses inodes (identical nodes) when possible.
4332:    We search for consecutive rows with the same nonzero structure, thereby
4333:    reusing matrix information to achieve increased efficiency.

4335:    Options Database Keys:
4336: +  -mat_no_inode  - Do not use inodes
4337: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)



4341:    Example usage:

4343:    Consider the following 8x8 matrix with 34 non-zero values, that is
4344:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4345:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4346:    as follows

4348: .vb
4349:             1  2  0  |  0  3  0  |  0  4
4350:     Proc0   0  5  6  |  7  0  0  |  8  0
4351:             9  0 10  | 11  0  0  | 12  0
4352:     -------------------------------------
4353:            13  0 14  | 15 16 17  |  0  0
4354:     Proc1   0 18  0  | 19 20 21  |  0  0
4355:             0  0  0  | 22 23  0  | 24  0
4356:     -------------------------------------
4357:     Proc2  25 26 27  |  0  0 28  | 29  0
4358:            30  0  0  | 31 32 33  |  0 34
4359: .ve

4361:    This can be represented as a collection of submatrices as

4363: .vb
4364:       A B C
4365:       D E F
4366:       G H I
4367: .ve

4369:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4370:    owned by proc1, G,H,I are owned by proc2.

4372:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4373:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4374:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4376:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4377:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4378:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4379:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4380:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4381:    matrix, ans [DF] as another SeqAIJ matrix.

4383:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4384:    allocated for every row of the local diagonal submatrix, and o_nz
4385:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4386:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4387:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4388:    In this case, the values of d_nz,o_nz are
4389: .vb
4390:      proc0 : dnz = 2, o_nz = 2
4391:      proc1 : dnz = 3, o_nz = 2
4392:      proc2 : dnz = 1, o_nz = 4
4393: .ve
4394:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4395:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4396:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4397:    34 values.

4399:    When d_nnz, o_nnz parameters are specified, the storage is specified
4400:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4401:    In the above case the values for d_nnz,o_nnz are
4402: .vb
4403:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4404:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4405:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4406: .ve
4407:    Here the space allocated is sum of all the above values i.e 34, and
4408:    hence pre-allocation is perfect.

4410:    Level: intermediate

4412: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4413:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4414: @*/
4415: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4416: {
4418:   PetscMPIInt    size;

4421:   MatCreate(comm,A);
4422:   MatSetSizes(*A,m,n,M,N);
4423:   MPI_Comm_size(comm,&size);
4424:   if (size > 1) {
4425:     MatSetType(*A,MATMPIAIJ);
4426:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4427:   } else {
4428:     MatSetType(*A,MATSEQAIJ);
4429:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4430:   }
4431:   return(0);
4432: }

4434: /*@C
4435:   MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix

4437:   Not collective

4439:   Input Parameter:
4440: . A - The MPIAIJ matrix

4442:   Output Parameters:
4443: + Ad - The local diagonal block as a SeqAIJ matrix
4444: . Ao - The local off-diagonal block as a SeqAIJ matrix
4445: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix

4447:   Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4448:   in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4449:   the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4450:   local column numbers to global column numbers in the original matrix.

4452:   Level: intermediate

4454: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAIJ, MATSEQAIJ
4455: @*/
4456: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4457: {
4458:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4459:   PetscBool      flg;

4463:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4464:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4465:   if (Ad)     *Ad     = a->A;
4466:   if (Ao)     *Ao     = a->B;
4467:   if (colmap) *colmap = a->garray;
4468:   return(0);
4469: }

4471: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4472: {
4474:   PetscInt       m,N,i,rstart,nnz,Ii;
4475:   PetscInt       *indx;
4476:   PetscScalar    *values;

4479:   MatGetSize(inmat,&m,&N);
4480:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4481:     PetscInt       *dnz,*onz,sum,bs,cbs;

4483:     if (n == PETSC_DECIDE) {
4484:       PetscSplitOwnership(comm,&n,&N);
4485:     }
4486:     /* Check sum(n) = N */
4487:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4488:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4490:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4491:     rstart -= m;

4493:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4494:     for (i=0; i<m; i++) {
4495:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4496:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4497:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4498:     }

4500:     MatCreate(comm,outmat);
4501:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4502:     MatGetBlockSizes(inmat,&bs,&cbs);
4503:     MatSetBlockSizes(*outmat,bs,cbs);
4504:     MatSetType(*outmat,MATAIJ);
4505:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4506:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4507:     MatPreallocateFinalize(dnz,onz);
4508:     MatSetOption(*outmat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
4509:   }

4511:   /* numeric phase */
4512:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4513:   for (i=0; i<m; i++) {
4514:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4515:     Ii   = i + rstart;
4516:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4517:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4518:   }
4519:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4520:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4521:   return(0);
4522: }

4524: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4525: {
4526:   PetscErrorCode    ierr;
4527:   PetscMPIInt       rank;
4528:   PetscInt          m,N,i,rstart,nnz;
4529:   size_t            len;
4530:   const PetscInt    *indx;
4531:   PetscViewer       out;
4532:   char              *name;
4533:   Mat               B;
4534:   const PetscScalar *values;

4537:   MatGetLocalSize(A,&m,NULL);
4538:   MatGetSize(A,NULL,&N);
4539:   /* Should this be the type of the diagonal block of A? */
4540:   MatCreate(PETSC_COMM_SELF,&B);
4541:   MatSetSizes(B,m,N,m,N);
4542:   MatSetBlockSizesFromMats(B,A,A);
4543:   MatSetType(B,MATSEQAIJ);
4544:   MatSeqAIJSetPreallocation(B,0,NULL);
4545:   MatGetOwnershipRange(A,&rstart,NULL);
4546:   for (i=0; i<m; i++) {
4547:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4548:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4549:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4550:   }
4551:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4552:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4554:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4555:   PetscStrlen(outfile,&len);
4556:   PetscMalloc1(len+6,&name);
4557:   PetscSNPrintf(name,len+6,"%s.%d",outfile,rank);
4558:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4559:   PetscFree(name);
4560:   MatView(B,out);
4561:   PetscViewerDestroy(&out);
4562:   MatDestroy(&B);
4563:   return(0);
4564: }

4566: static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data)
4567: {
4568:   PetscErrorCode      ierr;
4569:   Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data;

4572:   if (!merge) return(0);
4573:   PetscFree(merge->id_r);
4574:   PetscFree(merge->len_s);
4575:   PetscFree(merge->len_r);
4576:   PetscFree(merge->bi);
4577:   PetscFree(merge->bj);
4578:   PetscFree(merge->buf_ri[0]);
4579:   PetscFree(merge->buf_ri);
4580:   PetscFree(merge->buf_rj[0]);
4581:   PetscFree(merge->buf_rj);
4582:   PetscFree(merge->coi);
4583:   PetscFree(merge->coj);
4584:   PetscFree(merge->owners_co);
4585:   PetscLayoutDestroy(&merge->rowmap);
4586:   PetscFree(merge);
4587:   return(0);
4588: }

4590: #include <../src/mat/utils/freespace.h>
4591: #include <petscbt.h>

4593: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4594: {
4595:   PetscErrorCode      ierr;
4596:   MPI_Comm            comm;
4597:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4598:   PetscMPIInt         size,rank,taga,*len_s;
4599:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4600:   PetscInt            proc,m;
4601:   PetscInt            **buf_ri,**buf_rj;
4602:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4603:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4604:   MPI_Request         *s_waits,*r_waits;
4605:   MPI_Status          *status;
4606:   MatScalar           *aa=a->a;
4607:   MatScalar           **abuf_r,*ba_i;
4608:   Mat_Merge_SeqsToMPI *merge;
4609:   PetscContainer      container;

4612:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4613:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4615:   MPI_Comm_size(comm,&size);
4616:   MPI_Comm_rank(comm,&rank);

4618:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4619:   if (!container) SETERRQ(PetscObjectComm((PetscObject)mpimat),PETSC_ERR_PLIB,"Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic");
4620:   PetscContainerGetPointer(container,(void**)&merge);

4622:   bi     = merge->bi;
4623:   bj     = merge->bj;
4624:   buf_ri = merge->buf_ri;
4625:   buf_rj = merge->buf_rj;

4627:   PetscMalloc1(size,&status);
4628:   owners = merge->rowmap->range;
4629:   len_s  = merge->len_s;

4631:   /* send and recv matrix values */
4632:   /*-----------------------------*/
4633:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4634:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4636:   PetscMalloc1(merge->nsend+1,&s_waits);
4637:   for (proc=0,k=0; proc<size; proc++) {
4638:     if (!len_s[proc]) continue;
4639:     i    = owners[proc];
4640:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4641:     k++;
4642:   }

4644:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4645:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4646:   PetscFree(status);

4648:   PetscFree(s_waits);
4649:   PetscFree(r_waits);

4651:   /* insert mat values of mpimat */
4652:   /*----------------------------*/
4653:   PetscMalloc1(N,&ba_i);
4654:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4656:   for (k=0; k<merge->nrecv; k++) {
4657:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4658:     nrows       = *(buf_ri_k[k]);
4659:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4660:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4661:   }

4663:   /* set values of ba */
4664:   m = merge->rowmap->n;
4665:   for (i=0; i<m; i++) {
4666:     arow = owners[rank] + i;
4667:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4668:     bnzi = bi[i+1] - bi[i];
4669:     PetscArrayzero(ba_i,bnzi);

4671:     /* add local non-zero vals of this proc's seqmat into ba */
4672:     anzi   = ai[arow+1] - ai[arow];
4673:     aj     = a->j + ai[arow];
4674:     aa     = a->a + ai[arow];
4675:     nextaj = 0;
4676:     for (j=0; nextaj<anzi; j++) {
4677:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4678:         ba_i[j] += aa[nextaj++];
4679:       }
4680:     }

4682:     /* add received vals into ba */
4683:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4684:       /* i-th row */
4685:       if (i == *nextrow[k]) {
4686:         anzi   = *(nextai[k]+1) - *nextai[k];
4687:         aj     = buf_rj[k] + *(nextai[k]);
4688:         aa     = abuf_r[k] + *(nextai[k]);
4689:         nextaj = 0;
4690:         for (j=0; nextaj<anzi; j++) {
4691:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4692:             ba_i[j] += aa[nextaj++];
4693:           }
4694:         }
4695:         nextrow[k]++; nextai[k]++;
4696:       }
4697:     }
4698:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4699:   }
4700:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4701:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4703:   PetscFree(abuf_r[0]);
4704:   PetscFree(abuf_r);
4705:   PetscFree(ba_i);
4706:   PetscFree3(buf_ri_k,nextrow,nextai);
4707:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4708:   return(0);
4709: }

4711: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4712: {
4713:   PetscErrorCode      ierr;
4714:   Mat                 B_mpi;
4715:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4716:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4717:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4718:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4719:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4720:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4721:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4722:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4723:   MPI_Status          *status;
4724:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4725:   PetscBT             lnkbt;
4726:   Mat_Merge_SeqsToMPI *merge;
4727:   PetscContainer      container;

4730:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4732:   /* make sure it is a PETSc comm */
4733:   PetscCommDuplicate(comm,&comm,NULL);
4734:   MPI_Comm_size(comm,&size);
4735:   MPI_Comm_rank(comm,&rank);

4737:   PetscNew(&merge);
4738:   PetscMalloc1(size,&status);

4740:   /* determine row ownership */
4741:   /*---------------------------------------------------------*/
4742:   PetscLayoutCreate(comm,&merge->rowmap);
4743:   PetscLayoutSetLocalSize(merge->rowmap,m);
4744:   PetscLayoutSetSize(merge->rowmap,M);
4745:   PetscLayoutSetBlockSize(merge->rowmap,1);
4746:   PetscLayoutSetUp(merge->rowmap);
4747:   PetscMalloc1(size,&len_si);
4748:   PetscMalloc1(size,&merge->len_s);

4750:   m      = merge->rowmap->n;
4751:   owners = merge->rowmap->range;

4753:   /* determine the number of messages to send, their lengths */
4754:   /*---------------------------------------------------------*/
4755:   len_s = merge->len_s;

4757:   len          = 0; /* length of buf_si[] */
4758:   merge->nsend = 0;
4759:   for (proc=0; proc<size; proc++) {
4760:     len_si[proc] = 0;
4761:     if (proc == rank) {
4762:       len_s[proc] = 0;
4763:     } else {
4764:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4765:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4766:     }
4767:     if (len_s[proc]) {
4768:       merge->nsend++;
4769:       nrows = 0;
4770:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4771:         if (ai[i+1] > ai[i]) nrows++;
4772:       }
4773:       len_si[proc] = 2*(nrows+1);
4774:       len         += len_si[proc];
4775:     }
4776:   }

4778:   /* determine the number and length of messages to receive for ij-structure */
4779:   /*-------------------------------------------------------------------------*/
4780:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4781:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4783:   /* post the Irecv of j-structure */
4784:   /*-------------------------------*/
4785:   PetscCommGetNewTag(comm,&tagj);
4786:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4788:   /* post the Isend of j-structure */
4789:   /*--------------------------------*/
4790:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4792:   for (proc=0, k=0; proc<size; proc++) {
4793:     if (!len_s[proc]) continue;
4794:     i    = owners[proc];
4795:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4796:     k++;
4797:   }

4799:   /* receives and sends of j-structure are complete */
4800:   /*------------------------------------------------*/
4801:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4802:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4804:   /* send and recv i-structure */
4805:   /*---------------------------*/
4806:   PetscCommGetNewTag(comm,&tagi);
4807:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4809:   PetscMalloc1(len+1,&buf_s);
4810:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4811:   for (proc=0,k=0; proc<size; proc++) {
4812:     if (!len_s[proc]) continue;
4813:     /* form outgoing message for i-structure:
4814:          buf_si[0]:                 nrows to be sent
4815:                [1:nrows]:           row index (global)
4816:                [nrows+1:2*nrows+1]: i-structure index
4817:     */
4818:     /*-------------------------------------------*/
4819:     nrows       = len_si[proc]/2 - 1;
4820:     buf_si_i    = buf_si + nrows+1;
4821:     buf_si[0]   = nrows;
4822:     buf_si_i[0] = 0;
4823:     nrows       = 0;
4824:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4825:       anzi = ai[i+1] - ai[i];
4826:       if (anzi) {
4827:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4828:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4829:         nrows++;
4830:       }
4831:     }
4832:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4833:     k++;
4834:     buf_si += len_si[proc];
4835:   }

4837:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4838:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4840:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4841:   for (i=0; i<merge->nrecv; i++) {
4842:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4843:   }

4845:   PetscFree(len_si);
4846:   PetscFree(len_ri);
4847:   PetscFree(rj_waits);
4848:   PetscFree2(si_waits,sj_waits);
4849:   PetscFree(ri_waits);
4850:   PetscFree(buf_s);
4851:   PetscFree(status);

4853:   /* compute a local seq matrix in each processor */
4854:   /*----------------------------------------------*/
4855:   /* allocate bi array and free space for accumulating nonzero column info */
4856:   PetscMalloc1(m+1,&bi);
4857:   bi[0] = 0;

4859:   /* create and initialize a linked list */
4860:   nlnk = N+1;
4861:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4863:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4864:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4865:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4867:   current_space = free_space;

4869:   /* determine symbolic info for each local row */
4870:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4872:   for (k=0; k<merge->nrecv; k++) {
4873:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4874:     nrows       = *buf_ri_k[k];
4875:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4876:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4877:   }

4879:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4880:   len  = 0;
4881:   for (i=0; i<m; i++) {
4882:     bnzi = 0;
4883:     /* add local non-zero cols of this proc's seqmat into lnk */
4884:     arow  = owners[rank] + i;
4885:     anzi  = ai[arow+1] - ai[arow];
4886:     aj    = a->j + ai[arow];
4887:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4888:     bnzi += nlnk;
4889:     /* add received col data into lnk */
4890:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4891:       if (i == *nextrow[k]) { /* i-th row */
4892:         anzi  = *(nextai[k]+1) - *nextai[k];
4893:         aj    = buf_rj[k] + *nextai[k];
4894:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4895:         bnzi += nlnk;
4896:         nextrow[k]++; nextai[k]++;
4897:       }
4898:     }
4899:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4901:     /* if free space is not available, make more free space */
4902:     if (current_space->local_remaining<bnzi) {
4903:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4904:       nspacedouble++;
4905:     }
4906:     /* copy data into free space, then initialize lnk */
4907:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4908:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4910:     current_space->array           += bnzi;
4911:     current_space->local_used      += bnzi;
4912:     current_space->local_remaining -= bnzi;

4914:     bi[i+1] = bi[i] + bnzi;
4915:   }

4917:   PetscFree3(buf_ri_k,nextrow,nextai);

4919:   PetscMalloc1(bi[m]+1,&bj);
4920:   PetscFreeSpaceContiguous(&free_space,bj);
4921:   PetscLLDestroy(lnk,lnkbt);

4923:   /* create symbolic parallel matrix B_mpi */
4924:   /*---------------------------------------*/
4925:   MatGetBlockSizes(seqmat,&bs,&cbs);
4926:   MatCreate(comm,&B_mpi);
4927:   if (n==PETSC_DECIDE) {
4928:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4929:   } else {
4930:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4931:   }
4932:   MatSetBlockSizes(B_mpi,bs,cbs);
4933:   MatSetType(B_mpi,MATMPIAIJ);
4934:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4935:   MatPreallocateFinalize(dnz,onz);
4936:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

4938:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4939:   B_mpi->assembled  = PETSC_FALSE;
4940:   merge->bi         = bi;
4941:   merge->bj         = bj;
4942:   merge->buf_ri     = buf_ri;
4943:   merge->buf_rj     = buf_rj;
4944:   merge->coi        = NULL;
4945:   merge->coj        = NULL;
4946:   merge->owners_co  = NULL;

4948:   PetscCommDestroy(&comm);

4950:   /* attach the supporting struct to B_mpi for reuse */
4951:   PetscContainerCreate(PETSC_COMM_SELF,&container);
4952:   PetscContainerSetPointer(container,merge);
4953:   PetscContainerSetUserDestroy(container,MatDestroy_MPIAIJ_SeqsToMPI);
4954:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4955:   PetscContainerDestroy(&container);
4956:   *mpimat = B_mpi;

4958:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4959:   return(0);
4960: }

4962: /*@C
4963:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4964:                  matrices from each processor

4966:     Collective

4968:    Input Parameters:
4969: +    comm - the communicators the parallel matrix will live on
4970: .    seqmat - the input sequential matrices
4971: .    m - number of local rows (or PETSC_DECIDE)
4972: .    n - number of local columns (or PETSC_DECIDE)
4973: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4975:    Output Parameter:
4976: .    mpimat - the parallel matrix generated

4978:     Level: advanced

4980:    Notes:
4981:      The dimensions of the sequential matrix in each processor MUST be the same.
4982:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4983:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4984: @*/
4985: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4986: {
4988:   PetscMPIInt    size;

4991:   MPI_Comm_size(comm,&size);
4992:   if (size == 1) {
4993:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4994:     if (scall == MAT_INITIAL_MATRIX) {
4995:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4996:     } else {
4997:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4998:     }
4999:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5000:     return(0);
5001:   }
5002:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5003:   if (scall == MAT_INITIAL_MATRIX) {
5004:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
5005:   }
5006:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
5007:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5008:   return(0);
5009: }

5011: /*@
5012:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5013:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
5014:           with MatGetSize()

5016:     Not Collective

5018:    Input Parameters:
5019: +    A - the matrix
5020: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5022:    Output Parameter:
5023: .    A_loc - the local sequential matrix generated

5025:     Level: developer

5027:    Notes:
5028:      When the communicator associated with A has size 1 and MAT_INITIAL_MATRIX is requested, the matrix returned is the diagonal part of A.
5029:      If MAT_REUSE_MATRIX is requested with comm size 1, MatCopy(Adiag,*A_loc,SAME_NONZERO_PATTERN) is called.
5030:      This means that one can preallocate the proper sequential matrix first and then call this routine with MAT_REUSE_MATRIX to safely
5031:      modify the values of the returned A_loc.

5033: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed(), MatMPIAIJGetLocalMatMerge()
5034: @*/
5035: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
5036: {
5037:   PetscErrorCode    ierr;
5038:   Mat_MPIAIJ        *mpimat=(Mat_MPIAIJ*)A->data;
5039:   Mat_SeqAIJ        *mat,*a,*b;
5040:   PetscInt          *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
5041:   const PetscScalar *aa,*ba,*aav,*bav;
5042:   PetscScalar       *ca,*cam;
5043:   PetscMPIInt       size;
5044:   PetscInt          am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
5045:   PetscInt          *ci,*cj,col,ncols_d,ncols_o,jo;
5046:   PetscBool         match;

5049:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
5050:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5051:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
5052:   if (size == 1) {
5053:     if (scall == MAT_INITIAL_MATRIX) {
5054:       PetscObjectReference((PetscObject)mpimat->A);
5055:       *A_loc = mpimat->A;
5056:     } else if (scall == MAT_REUSE_MATRIX) {
5057:       MatCopy(mpimat->A,*A_loc,SAME_NONZERO_PATTERN);
5058:     }
5059:     return(0);
5060:   }

5062:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5063:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
5064:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
5065:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5066:   MatSeqAIJGetArrayRead(mpimat->A,&aav);
5067:   MatSeqAIJGetArrayRead(mpimat->B,&bav);
5068:   aa   = aav;
5069:   ba   = bav;
5070:   if (scall == MAT_INITIAL_MATRIX) {
5071:     PetscMalloc1(1+am,&ci);
5072:     ci[0] = 0;
5073:     for (i=0; i<am; i++) {
5074:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5075:     }
5076:     PetscMalloc1(1+ci[am],&cj);
5077:     PetscMalloc1(1+ci[am],&ca);
5078:     k    = 0;
5079:     for (i=0; i<am; i++) {
5080:       ncols_o = bi[i+1] - bi[i];
5081:       ncols_d = ai[i+1] - ai[i];
5082:       /* off-diagonal portion of A */
5083:       for (jo=0; jo<ncols_o; jo++) {
5084:         col = cmap[*bj];
5085:         if (col >= cstart) break;
5086:         cj[k]   = col; bj++;
5087:         ca[k++] = *ba++;
5088:       }
5089:       /* diagonal portion of A */
5090:       for (j=0; j<ncols_d; j++) {
5091:         cj[k]   = cstart + *aj++;
5092:         ca[k++] = *aa++;
5093:       }
5094:       /* off-diagonal portion of A */
5095:       for (j=jo; j<ncols_o; j++) {
5096:         cj[k]   = cmap[*bj++];
5097:         ca[k++] = *ba++;
5098:       }
5099:     }
5100:     /* put together the new matrix */
5101:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5102:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5103:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5104:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5105:     mat->free_a  = PETSC_TRUE;
5106:     mat->free_ij = PETSC_TRUE;
5107:     mat->nonew   = 0;
5108:   } else if (scall == MAT_REUSE_MATRIX) {
5109:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5110: #if defined(PETSC_USE_DEVICE)
5111:     (*A_loc)->offloadmask = PETSC_OFFLOAD_CPU;
5112: #endif
5113:     ci = mat->i; cj = mat->j; cam = mat->a;
5114:     for (i=0; i<am; i++) {
5115:       /* off-diagonal portion of A */
5116:       ncols_o = bi[i+1] - bi[i];
5117:       for (jo=0; jo<ncols_o; jo++) {
5118:         col = cmap[*bj];
5119:         if (col >= cstart) break;
5120:         *cam++ = *ba++; bj++;
5121:       }
5122:       /* diagonal portion of A */
5123:       ncols_d = ai[i+1] - ai[i];
5124:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5125:       /* off-diagonal portion of A */
5126:       for (j=jo; j<ncols_o; j++) {
5127:         *cam++ = *ba++; bj++;
5128:       }
5129:     }
5130:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5131:   MatSeqAIJRestoreArrayRead(mpimat->A,&aav);
5132:   MatSeqAIJRestoreArrayRead(mpimat->B,&bav);
5133:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5134:   return(0);
5135: }

5137: /*@
5138:      MatMPIAIJGetLocalMatMerge - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5139:           mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and offdiagonal part

5141:     Not Collective

5143:    Input Parameters:
5144: +    A - the matrix
5145: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5147:    Output Parameter:
5148: +    glob - sequential IS with global indices associated with the columns of the local sequential matrix generated (can be NULL)
5149: -    A_loc - the local sequential matrix generated

5151:     Level: developer

5153:    Notes:
5154:      This is different from MatMPIAIJGetLocalMat() since the first columns in the returning matrix are those associated with the diagonal part, then those associated with the offdiagonal part (in its local ordering)

5156: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed()

5158: @*/
5159: PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A,MatReuse scall,IS *glob,Mat *A_loc)
5160: {
5162:   Mat            Ao,Ad;
5163:   const PetscInt *cmap;
5164:   PetscMPIInt    size;
5165:   PetscErrorCode (*f)(Mat,MatReuse,IS*,Mat*);

5168:   MatMPIAIJGetSeqAIJ(A,&Ad,&Ao,&cmap);
5169:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
5170:   if (size == 1) {
5171:     if (scall == MAT_INITIAL_MATRIX) {
5172:       PetscObjectReference((PetscObject)Ad);
5173:       *A_loc = Ad;
5174:     } else if (scall == MAT_REUSE_MATRIX) {
5175:       MatCopy(Ad,*A_loc,SAME_NONZERO_PATTERN);
5176:     }
5177:     if (glob) { ISCreateStride(PetscObjectComm((PetscObject)Ad),Ad->cmap->n,Ad->cmap->rstart,1,glob); }
5178:     return(0);
5179:   }
5180:   PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",&f);
5181:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5182:   if (f) {
5183:     (*f)(A,scall,glob,A_loc);
5184:   } else {
5185:     Mat_SeqAIJ        *a = (Mat_SeqAIJ*)Ad->data;
5186:     Mat_SeqAIJ        *b = (Mat_SeqAIJ*)Ao->data;
5187:     Mat_SeqAIJ        *c;
5188:     PetscInt          *ai = a->i, *aj = a->j;
5189:     PetscInt          *bi = b->i, *bj = b->j;
5190:     PetscInt          *ci,*cj;
5191:     const PetscScalar *aa,*ba;
5192:     PetscScalar       *ca;
5193:     PetscInt          i,j,am,dn,on;

5195:     MatGetLocalSize(Ad,&am,&dn);
5196:     MatGetLocalSize(Ao,NULL,&on);
5197:     MatSeqAIJGetArrayRead(Ad,&aa);
5198:     MatSeqAIJGetArrayRead(Ao,&ba);
5199:     if (scall == MAT_INITIAL_MATRIX) {
5200:       PetscInt k;
5201:       PetscMalloc1(1+am,&ci);
5202:       PetscMalloc1(ai[am]+bi[am],&cj);
5203:       PetscMalloc1(ai[am]+bi[am],&ca);
5204:       ci[0] = 0;
5205:       for (i=0,k=0; i<am; i++) {
5206:         const PetscInt ncols_o = bi[i+1] - bi[i];
5207:         const PetscInt ncols_d = ai[i+1] - ai[i];
5208:         ci[i+1] = ci[i] + ncols_o + ncols_d;
5209:         /* diagonal portion of A */
5210:         for (j=0; j<ncols_d; j++,k++) {
5211:           cj[k] = *aj++;
5212:           ca[k] = *aa++;
5213:         }
5214:         /* off-diagonal portion of A */
5215:         for (j=0; j<ncols_o; j++,k++) {
5216:           cj[k] = dn + *bj++;
5217:           ca[k] = *ba++;
5218:         }
5219:       }
5220:       /* put together the new matrix */
5221:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,dn+on,ci,cj,ca,A_loc);
5222:       /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5223:       /* Since these are PETSc arrays, change flags to free them as necessary. */
5224:       c          = (Mat_SeqAIJ*)(*A_loc)->data;
5225:       c->free_a  = PETSC_TRUE;
5226:       c->free_ij = PETSC_TRUE;
5227:       c->nonew   = 0;
5228:       MatSetType(*A_loc,((PetscObject)Ad)->type_name);
5229:     } else if (scall == MAT_REUSE_MATRIX) {
5230: #if defined(PETSC_HAVE_DEVICE)
5231:       (*A_loc)->offloadmask = PETSC_OFFLOAD_CPU;
5232: #endif
5233:       c  = (Mat_SeqAIJ*)(*A_loc)->data;
5234:       ca = c->a;
5235:       for (i=0; i<am; i++) {
5236:         const PetscInt ncols_d = ai[i+1] - ai[i];
5237:         const PetscInt ncols_o = bi[i+1] - bi[i];
5238:         /* diagonal portion of A */
5239:         for (j=0; j<ncols_d; j++) *ca++ = *aa++;
5240:         /* off-diagonal portion of A */
5241:         for (j=0; j<ncols_o; j++) *ca++ = *ba++;
5242:       }
5243:     } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5244:     MatSeqAIJRestoreArrayRead(Ad,&aa);
5245:     MatSeqAIJRestoreArrayRead(Ao,&aa);
5246:     if (glob) {
5247:       PetscInt cst, *gidx;

5249:       MatGetOwnershipRangeColumn(A,&cst,NULL);
5250:       PetscMalloc1(dn+on,&gidx);
5251:       for (i=0; i<dn; i++) gidx[i]    = cst + i;
5252:       for (i=0; i<on; i++) gidx[i+dn] = cmap[i];
5253:       ISCreateGeneral(PetscObjectComm((PetscObject)Ad),dn+on,gidx,PETSC_OWN_POINTER,glob);
5254:     }
5255:   }
5256:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5257:   return(0);
5258: }

5260: /*@C
5261:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5263:     Not Collective

5265:    Input Parameters:
5266: +    A - the matrix
5267: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5268: -    row, col - index sets of rows and columns to extract (or NULL)

5270:    Output Parameter:
5271: .    A_loc - the local sequential matrix generated

5273:     Level: developer

5275: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5277: @*/
5278: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5279: {
5280:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5282:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5283:   IS             isrowa,iscola;
5284:   Mat            *aloc;
5285:   PetscBool      match;

5288:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5289:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5290:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5291:   if (!row) {
5292:     start = A->rmap->rstart; end = A->rmap->rend;
5293:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5294:   } else {
5295:     isrowa = *row;
5296:   }
5297:   if (!col) {
5298:     start = A->cmap->rstart;
5299:     cmap  = a->garray;
5300:     nzA   = a->A->cmap->n;
5301:     nzB   = a->B->cmap->n;
5302:     PetscMalloc1(nzA+nzB, &idx);
5303:     ncols = 0;
5304:     for (i=0; i<nzB; i++) {
5305:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5306:       else break;
5307:     }
5308:     imark = i;
5309:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5310:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5311:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5312:   } else {
5313:     iscola = *col;
5314:   }
5315:   if (scall != MAT_INITIAL_MATRIX) {
5316:     PetscMalloc1(1,&aloc);
5317:     aloc[0] = *A_loc;
5318:   }
5319:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5320:   if (!col) { /* attach global id of condensed columns */
5321:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5322:   }
5323:   *A_loc = aloc[0];
5324:   PetscFree(aloc);
5325:   if (!row) {
5326:     ISDestroy(&isrowa);
5327:   }
5328:   if (!col) {
5329:     ISDestroy(&iscola);
5330:   }
5331:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5332:   return(0);
5333: }

5335: /*
5336:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5337:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5338:  * on a global size.
5339:  * */
5340: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5341: {
5342:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5343:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5344:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,lidx,*nrcols,*nlcols,ncol;
5345:   PetscMPIInt              owner;
5346:   PetscSFNode              *iremote,*oiremote;
5347:   const PetscInt           *lrowindices;
5348:   PetscErrorCode           ierr;
5349:   PetscSF                  sf,osf;
5350:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5351:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5352:   MPI_Comm                 comm;
5353:   ISLocalToGlobalMapping   mapping;

5356:   PetscObjectGetComm((PetscObject)P,&comm);
5357:   /* plocalsize is the number of roots
5358:    * nrows is the number of leaves
5359:    * */
5360:   MatGetLocalSize(P,&plocalsize,NULL);
5361:   ISGetLocalSize(rows,&nrows);
5362:   PetscCalloc1(nrows,&iremote);
5363:   ISGetIndices(rows,&lrowindices);
5364:   for (i=0;i<nrows;i++) {
5365:     /* Find a remote index and an owner for a row
5366:      * The row could be local or remote
5367:      * */
5368:     owner = 0;
5369:     lidx  = 0;
5370:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5371:     iremote[i].index = lidx;
5372:     iremote[i].rank  = owner;
5373:   }
5374:   /* Create SF to communicate how many nonzero columns for each row */
5375:   PetscSFCreate(comm,&sf);
5376:   /* SF will figure out the number of nonzero colunms for each row, and their
5377:    * offsets
5378:    * */
5379:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5380:   PetscSFSetFromOptions(sf);
5381:   PetscSFSetUp(sf);

5383:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5384:   PetscCalloc1(2*plocalsize,&nrcols);
5385:   PetscCalloc1(nrows,&pnnz);
5386:   roffsets[0] = 0;
5387:   roffsets[1] = 0;
5388:   for (i=0;i<plocalsize;i++) {
5389:     /* diag */
5390:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5391:     /* off diag */
5392:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5393:     /* compute offsets so that we relative location for each row */
5394:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5395:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5396:   }
5397:   PetscCalloc1(2*nrows,&nlcols);
5398:   PetscCalloc1(2*nrows,&loffsets);
5399:   /* 'r' means root, and 'l' means leaf */
5400:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5401:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5402:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5403:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5404:   PetscSFDestroy(&sf);
5405:   PetscFree(roffsets);
5406:   PetscFree(nrcols);
5407:   dntotalcols = 0;
5408:   ontotalcols = 0;
5409:   ncol = 0;
5410:   for (i=0;i<nrows;i++) {
5411:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5412:     ncol = PetscMax(pnnz[i],ncol);
5413:     /* diag */
5414:     dntotalcols += nlcols[i*2+0];
5415:     /* off diag */
5416:     ontotalcols += nlcols[i*2+1];
5417:   }
5418:   /* We do not need to figure the right number of columns
5419:    * since all the calculations will be done by going through the raw data
5420:    * */
5421:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5422:   MatSetUp(*P_oth);
5423:   PetscFree(pnnz);
5424:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5425:   /* diag */
5426:   PetscCalloc1(dntotalcols,&iremote);
5427:   /* off diag */
5428:   PetscCalloc1(ontotalcols,&oiremote);
5429:   /* diag */
5430:   PetscCalloc1(dntotalcols,&ilocal);
5431:   /* off diag */
5432:   PetscCalloc1(ontotalcols,&oilocal);
5433:   dntotalcols = 0;
5434:   ontotalcols = 0;
5435:   ntotalcols  = 0;
5436:   for (i=0;i<nrows;i++) {
5437:     owner = 0;
5438:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5439:     /* Set iremote for diag matrix */
5440:     for (j=0;j<nlcols[i*2+0];j++) {
5441:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5442:       iremote[dntotalcols].rank    = owner;
5443:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5444:       ilocal[dntotalcols++]        = ntotalcols++;
5445:     }
5446:     /* off diag */
5447:     for (j=0;j<nlcols[i*2+1];j++) {
5448:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5449:       oiremote[ontotalcols].rank    = owner;
5450:       oilocal[ontotalcols++]        = ntotalcols++;
5451:     }
5452:   }
5453:   ISRestoreIndices(rows,&lrowindices);
5454:   PetscFree(loffsets);
5455:   PetscFree(nlcols);
5456:   PetscSFCreate(comm,&sf);
5457:   /* P serves as roots and P_oth is leaves
5458:    * Diag matrix
5459:    * */
5460:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5461:   PetscSFSetFromOptions(sf);
5462:   PetscSFSetUp(sf);

5464:   PetscSFCreate(comm,&osf);
5465:   /* Off diag */
5466:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5467:   PetscSFSetFromOptions(osf);
5468:   PetscSFSetUp(osf);
5469:   /* We operate on the matrix internal data for saving memory */
5470:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5471:   PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5472:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5473:   /* Convert to global indices for diag matrix */
5474:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5475:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5476:   /* We want P_oth store global indices */
5477:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5478:   /* Use memory scalable approach */
5479:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5480:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5481:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5482:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5483:   /* Convert back to local indices */
5484:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5485:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5486:   nout = 0;
5487:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5488:   if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5489:   ISLocalToGlobalMappingDestroy(&mapping);
5490:   /* Exchange values */
5491:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5492:   PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5493:   /* Stop PETSc from shrinking memory */
5494:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5495:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5496:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5497:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5498:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5499:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5500:   PetscSFDestroy(&sf);
5501:   PetscSFDestroy(&osf);
5502:   return(0);
5503: }

5505: /*
5506:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5507:  * This supports MPIAIJ and MAIJ
5508:  * */
5509: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5510: {
5511:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5512:   Mat_SeqAIJ            *p_oth;
5513:   Mat_SeqAIJ            *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5514:   IS                    rows,map;
5515:   PetscHMapI            hamp;
5516:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5517:   MPI_Comm              comm;
5518:   PetscSF               sf,osf;
5519:   PetscBool             has;
5520:   PetscErrorCode        ierr;

5523:   PetscObjectGetComm((PetscObject)A,&comm);
5524:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5525:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5526:    *  and then create a submatrix (that often is an overlapping matrix)
5527:    * */
5528:   if (reuse == MAT_INITIAL_MATRIX) {
5529:     /* Use a hash table to figure out unique keys */
5530:     PetscHMapICreate(&hamp);
5531:     PetscHMapIResize(hamp,a->B->cmap->n);
5532:     PetscCalloc1(a->B->cmap->n,&mapping);
5533:     count = 0;
5534:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5535:     for (i=0;i<a->B->cmap->n;i++) {
5536:       key  = a->garray[i]/dof;
5537:       PetscHMapIHas(hamp,key,&has);
5538:       if (!has) {
5539:         mapping[i] = count;
5540:         PetscHMapISet(hamp,key,count++);
5541:       } else {
5542:         /* Current 'i' has the same value the previous step */
5543:         mapping[i] = count-1;
5544:       }
5545:     }
5546:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5547:     PetscHMapIGetSize(hamp,&htsize);
5548:     if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5549:     PetscCalloc1(htsize,&rowindices);
5550:     off = 0;
5551:     PetscHMapIGetKeys(hamp,&off,rowindices);
5552:     PetscHMapIDestroy(&hamp);
5553:     PetscSortInt(htsize,rowindices);
5554:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5555:     /* In case, the matrix was already created but users want to recreate the matrix */
5556:     MatDestroy(P_oth);
5557:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5558:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5559:     ISDestroy(&map);
5560:     ISDestroy(&rows);
5561:   } else if (reuse == MAT_REUSE_MATRIX) {
5562:     /* If matrix was already created, we simply update values using SF objects
5563:      * that as attached to the matrix ealier.
5564:      *  */
5565:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5566:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5567:     if (!sf || !osf) SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet");
5568:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5569:     /* Update values in place */
5570:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5571:     PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5572:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5573:     PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5574:   } else SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type");
5575:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5576:   return(0);
5577: }

5579: /*@C
5580:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5582:     Collective on Mat

5584:    Input Parameters:
5585: +    A,B - the matrices in mpiaij format
5586: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5587: -    rowb, colb - index sets of rows and columns of B to extract (or NULL)

5589:    Output Parameter:
5590: +    rowb, colb - index sets of rows and columns of B to extract
5591: -    B_seq - the sequential matrix generated

5593:     Level: developer

5595: @*/
5596: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5597: {
5598:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5600:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5601:   IS             isrowb,iscolb;
5602:   Mat            *bseq=NULL;

5605:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5606:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5607:   }
5608:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5610:   if (scall == MAT_INITIAL_MATRIX) {
5611:     start = A->cmap->rstart;
5612:     cmap  = a->garray;
5613:     nzA   = a->A->cmap->n;
5614:     nzB   = a->B->cmap->n;
5615:     PetscMalloc1(nzA+nzB, &idx);
5616:     ncols = 0;
5617:     for (i=0; i<nzB; i++) {  /* row < local row index */
5618:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5619:       else break;
5620:     }
5621:     imark = i;
5622:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5623:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5624:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5625:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5626:   } else {
5627:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5628:     isrowb  = *rowb; iscolb = *colb;
5629:     PetscMalloc1(1,&bseq);
5630:     bseq[0] = *B_seq;
5631:   }
5632:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5633:   *B_seq = bseq[0];
5634:   PetscFree(bseq);
5635:   if (!rowb) {
5636:     ISDestroy(&isrowb);
5637:   } else {
5638:     *rowb = isrowb;
5639:   }
5640:   if (!colb) {
5641:     ISDestroy(&iscolb);
5642:   } else {
5643:     *colb = iscolb;
5644:   }
5645:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5646:   return(0);
5647: }

5649: /*
5650:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5651:     of the OFF-DIAGONAL portion of local A

5653:     Collective on Mat

5655:    Input Parameters:
5656: +    A,B - the matrices in mpiaij format
5657: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5659:    Output Parameter:
5660: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5661: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5662: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5663: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5665:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5666:      for this matrix. This is not desirable..

5668:     Level: developer

5670: */
5671: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5672: {
5673:   PetscErrorCode         ierr;
5674:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5675:   Mat_SeqAIJ             *b_oth;
5676:   VecScatter             ctx;
5677:   MPI_Comm               comm;
5678:   const PetscMPIInt      *rprocs,*sprocs;
5679:   const PetscInt         *srow,*rstarts,*sstarts;
5680:   PetscInt               *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5681:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = NULL,*sstartsj,len;
5682:   PetscScalar            *b_otha,*bufa,*bufA,*vals = NULL;
5683:   MPI_Request            *rwaits = NULL,*swaits = NULL;
5684:   MPI_Status             rstatus;
5685:   PetscMPIInt            size,tag,rank,nsends_mpi,nrecvs_mpi;
5686:   PETSC_UNUSED PetscMPIInt jj;

5689:   PetscObjectGetComm((PetscObject)A,&comm);
5690:   MPI_Comm_size(comm,&size);

5692:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5693:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5694:   }
5695:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5696:   MPI_Comm_rank(comm,&rank);

5698:   if (size == 1) {
5699:     startsj_s = NULL;
5700:     bufa_ptr  = NULL;
5701:     *B_oth    = NULL;
5702:     return(0);
5703:   }

5705:   ctx = a->Mvctx;
5706:   tag = ((PetscObject)ctx)->tag;

5708:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5709:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5710:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5711:   PetscMPIIntCast(nsends,&nsends_mpi);
5712:   PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5713:   PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);

5715:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5716:   if (scall == MAT_INITIAL_MATRIX) {
5717:     /* i-array */
5718:     /*---------*/
5719:     /*  post receives */
5720:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5721:     for (i=0; i<nrecvs; i++) {
5722:       rowlen = rvalues + rstarts[i]*rbs;
5723:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5724:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5725:     }

5727:     /* pack the outgoing message */
5728:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5730:     sstartsj[0] = 0;
5731:     rstartsj[0] = 0;
5732:     len         = 0; /* total length of j or a array to be sent */
5733:     if (nsends) {
5734:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5735:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5736:     }
5737:     for (i=0; i<nsends; i++) {
5738:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5739:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5740:       for (j=0; j<nrows; j++) {
5741:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5742:         for (l=0; l<sbs; l++) {
5743:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5745:           rowlen[j*sbs+l] = ncols;

5747:           len += ncols;
5748:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5749:         }
5750:         k++;
5751:       }
5752:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5754:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5755:     }
5756:     /* recvs and sends of i-array are completed */
5757:     i = nrecvs;
5758:     while (i--) {
5759:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5760:     }
5761:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5762:     PetscFree(svalues);

5764:     /* allocate buffers for sending j and a arrays */
5765:     PetscMalloc1(len+1,&bufj);
5766:     PetscMalloc1(len+1,&bufa);

5768:     /* create i-array of B_oth */
5769:     PetscMalloc1(aBn+2,&b_othi);

5771:     b_othi[0] = 0;
5772:     len       = 0; /* total length of j or a array to be received */
5773:     k         = 0;
5774:     for (i=0; i<nrecvs; i++) {
5775:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5776:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5777:       for (j=0; j<nrows; j++) {
5778:         b_othi[k+1] = b_othi[k] + rowlen[j];
5779:         PetscIntSumError(rowlen[j],len,&len);
5780:         k++;
5781:       }
5782:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5783:     }
5784:     PetscFree(rvalues);

5786:     /* allocate space for j and a arrrays of B_oth */
5787:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5788:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5790:     /* j-array */
5791:     /*---------*/
5792:     /*  post receives of j-array */
5793:     for (i=0; i<nrecvs; i++) {
5794:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5795:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5796:     }

5798:     /* pack the outgoing message j-array */
5799:     if (nsends) k = sstarts[0];
5800:     for (i=0; i<nsends; i++) {
5801:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5802:       bufJ  = bufj+sstartsj[i];
5803:       for (j=0; j<nrows; j++) {
5804:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5805:         for (ll=0; ll<sbs; ll++) {
5806:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5807:           for (l=0; l<ncols; l++) {
5808:             *bufJ++ = cols[l];
5809:           }
5810:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5811:         }
5812:       }
5813:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5814:     }

5816:     /* recvs and sends of j-array are completed */
5817:     i = nrecvs;
5818:     while (i--) {
5819:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5820:     }
5821:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5822:   } else if (scall == MAT_REUSE_MATRIX) {
5823:     sstartsj = *startsj_s;
5824:     rstartsj = *startsj_r;
5825:     bufa     = *bufa_ptr;
5826:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5827:     b_otha   = b_oth->a;
5828: #if defined(PETSC_HAVE_DEVICE)
5829:     (*B_oth)->offloadmask = PETSC_OFFLOAD_CPU;
5830: #endif
5831:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");

5833:   /* a-array */
5834:   /*---------*/
5835:   /*  post receives of a-array */
5836:   for (i=0; i<nrecvs; i++) {
5837:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5838:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5839:   }

5841:   /* pack the outgoing message a-array */
5842:   if (nsends) k = sstarts[0];
5843:   for (i=0; i<nsends; i++) {
5844:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5845:     bufA  = bufa+sstartsj[i];
5846:     for (j=0; j<nrows; j++) {
5847:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5848:       for (ll=0; ll<sbs; ll++) {
5849:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5850:         for (l=0; l<ncols; l++) {
5851:           *bufA++ = vals[l];
5852:         }
5853:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5854:       }
5855:     }
5856:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5857:   }
5858:   /* recvs and sends of a-array are completed */
5859:   i = nrecvs;
5860:   while (i--) {
5861:     MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5862:   }
5863:   if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5864:   PetscFree2(rwaits,swaits);

5866:   if (scall == MAT_INITIAL_MATRIX) {
5867:     /* put together the new matrix */
5868:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5870:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5871:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5872:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5873:     b_oth->free_a  = PETSC_TRUE;
5874:     b_oth->free_ij = PETSC_TRUE;
5875:     b_oth->nonew   = 0;

5877:     PetscFree(bufj);
5878:     if (!startsj_s || !bufa_ptr) {
5879:       PetscFree2(sstartsj,rstartsj);
5880:       PetscFree(bufa_ptr);
5881:     } else {
5882:       *startsj_s = sstartsj;
5883:       *startsj_r = rstartsj;
5884:       *bufa_ptr  = bufa;
5885:     }
5886:   }

5888:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5889:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5890:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5891:   return(0);
5892: }

5894: /*@C
5895:   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.

5897:   Not Collective

5899:   Input Parameters:
5900: . A - The matrix in mpiaij format

5902:   Output Parameter:
5903: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5904: . colmap - A map from global column index to local index into lvec
5905: - multScatter - A scatter from the argument of a matrix-vector product to lvec

5907:   Level: developer

5909: @*/
5910: #if defined(PETSC_USE_CTABLE)
5911: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5912: #else
5913: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5914: #endif
5915: {
5916:   Mat_MPIAIJ *a;

5923:   a = (Mat_MPIAIJ*) A->data;
5924:   if (lvec) *lvec = a->lvec;
5925:   if (colmap) *colmap = a->colmap;
5926:   if (multScatter) *multScatter = a->Mvctx;
5927:   return(0);
5928: }

5930: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5931: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5932: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5933: #if defined(PETSC_HAVE_MKL_SPARSE)
5934: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5935: #endif
5936: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat,MatType,MatReuse,Mat*);
5937: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5938: #if defined(PETSC_HAVE_ELEMENTAL)
5939: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5940: #endif
5941: #if defined(PETSC_HAVE_SCALAPACK)
5942: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat,MatType,MatReuse,Mat*);
5943: #endif
5944: #if defined(PETSC_HAVE_HYPRE)
5945: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5946: #endif
5947: #if defined(PETSC_HAVE_CUDA)
5948: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
5949: #endif
5950: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
5951: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat,MatType,MatReuse,Mat*);
5952: #endif
5953: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5954: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5955: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);

5957: /*
5958:     Computes (B'*A')' since computing B*A directly is untenable

5960:                n                       p                          p
5961:         [             ]       [             ]         [                 ]
5962:       m [      A      ]  *  n [       B     ]   =   m [         C       ]
5963:         [             ]       [             ]         [                 ]

5965: */
5966: static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5967: {
5969:   Mat            At,Bt,Ct;

5972:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5973:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5974:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Ct);
5975:   MatDestroy(&At);
5976:   MatDestroy(&Bt);
5977:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5978:   MatDestroy(&Ct);
5979:   return(0);
5980: }

5982: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat C)
5983: {
5985:   PetscBool      cisdense;

5988:   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5989:   MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);
5990:   MatSetBlockSizesFromMats(C,A,B);
5991:   PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATMPIDENSE,MATMPIDENSECUDA,"");
5992:   if (!cisdense) {
5993:     MatSetType(C,((PetscObject)A)->type_name);
5994:   }
5995:   MatSetUp(C);

5997:   C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5998:   return(0);
5999: }

6001: /* ----------------------------------------------------------------*/
6002: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
6003: {
6004:   Mat_Product *product = C->product;
6005:   Mat         A = product->A,B=product->B;

6008:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
6009:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);

6011:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
6012:   C->ops->productsymbolic = MatProductSymbolic_AB;
6013:   return(0);
6014: }

6016: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
6017: {
6019:   Mat_Product    *product = C->product;

6022:   if (product->type == MATPRODUCT_AB) {
6023:     MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C);
6024:   }
6025:   return(0);
6026: }
6027: /* ----------------------------------------------------------------*/

6029: /*MC
6030:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

6032:    Options Database Keys:
6033: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

6035:    Level: beginner

6037:    Notes:
6038:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
6039:     in this case the values associated with the rows and columns one passes in are set to zero
6040:     in the matrix

6042:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
6043:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

6045: .seealso: MatCreateAIJ()
6046: M*/

6048: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6049: {
6050:   Mat_MPIAIJ     *b;
6052:   PetscMPIInt    size;

6055:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

6057:   PetscNewLog(B,&b);
6058:   B->data       = (void*)b;
6059:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
6060:   B->assembled  = PETSC_FALSE;
6061:   B->insertmode = NOT_SET_VALUES;
6062:   b->size       = size;

6064:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

6066:   /* build cache for off array entries formed */
6067:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

6069:   b->donotstash  = PETSC_FALSE;
6070:   b->colmap      = NULL;
6071:   b->garray      = NULL;
6072:   b->roworiented = PETSC_TRUE;

6074:   /* stuff used for matrix vector multiply */
6075:   b->lvec  = NULL;
6076:   b->Mvctx = NULL;

6078:   /* stuff for MatGetRow() */
6079:   b->rowindices   = NULL;
6080:   b->rowvalues    = NULL;
6081:   b->getrowactive = PETSC_FALSE;

6083:   /* flexible pointer used in CUSPARSE classes */
6084:   b->spptr = NULL;

6086:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
6087:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
6088:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
6089:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
6090:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
6091:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
6092:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
6093:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
6094:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
6095:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
6096: #if defined(PETSC_HAVE_CUDA)
6097:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcusparse_C",MatConvert_MPIAIJ_MPIAIJCUSPARSE);
6098: #endif
6099: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6100:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijkokkos_C",MatConvert_MPIAIJ_MPIAIJKokkos);
6101: #endif
6102: #if defined(PETSC_HAVE_MKL_SPARSE)
6103:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
6104: #endif
6105:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
6106:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpibaij_C",MatConvert_MPIAIJ_MPIBAIJ);
6107:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
6108:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpidense_C",MatConvert_MPIAIJ_MPIDense);
6109: #if defined(PETSC_HAVE_ELEMENTAL)
6110:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
6111: #endif
6112: #if defined(PETSC_HAVE_SCALAPACK)
6113:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_scalapack_C",MatConvert_AIJ_ScaLAPACK);
6114: #endif
6115:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
6116:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
6117: #if defined(PETSC_HAVE_HYPRE)
6118:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
6119:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",MatProductSetFromOptions_Transpose_AIJ_AIJ);
6120: #endif
6121:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_is_mpiaij_C",MatProductSetFromOptions_IS_XAIJ);
6122:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpiaij_C",MatProductSetFromOptions_MPIAIJ);
6123:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
6124:   return(0);
6125: }

6127: /*@C
6128:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
6129:          and "off-diagonal" part of the matrix in CSR format.

6131:    Collective

6133:    Input Parameters:
6134: +  comm - MPI communicator
6135: .  m - number of local rows (Cannot be PETSC_DECIDE)
6136: .  n - This value should be the same as the local size used in creating the
6137:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
6138:        calculated if N is given) For square matrices n is almost always m.
6139: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
6140: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
6141: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6142: .   j - column indices, which must be local, i.e., based off the start column of the diagonal portion
6143: .   a - matrix values
6144: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6145: .   oj - column indices, which must be global, representing global columns in the MPIAIJ matrix
6146: -   oa - matrix values

6148:    Output Parameter:
6149: .   mat - the matrix

6151:    Level: advanced

6153:    Notes:
6154:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6155:        must free the arrays once the matrix has been destroyed and not before.

6157:        The i and j indices are 0 based

6159:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

6161:        This sets local rows and cannot be used to set off-processor values.

6163:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6164:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6165:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6166:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6167:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
6168:        communication if it is known that only local entries will be set.

6170: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
6171:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
6172: @*/
6173: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
6174: {
6176:   Mat_MPIAIJ     *maij;

6179:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
6180:   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
6181:   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
6182:   MatCreate(comm,mat);
6183:   MatSetSizes(*mat,m,n,M,N);
6184:   MatSetType(*mat,MATMPIAIJ);
6185:   maij = (Mat_MPIAIJ*) (*mat)->data;

6187:   (*mat)->preallocated = PETSC_TRUE;

6189:   PetscLayoutSetUp((*mat)->rmap);
6190:   PetscLayoutSetUp((*mat)->cmap);

6192:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6193:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

6195:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6196:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6197:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6198:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6199:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6200:   return(0);
6201: }

6203: /*
6204:     Special version for direct calls from Fortran
6205: */
6206: #include <petsc/private/fortranimpl.h>

6208: /* Change these macros so can be used in void function */
6209: #undef CHKERRQ
6210: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
6211: #undef SETERRQ2
6212: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
6213: #undef SETERRQ3
6214: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6215: #undef SETERRQ
6216: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)

6218: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6219: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6220: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6221: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6222: #else
6223: #endif
6224: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6225: {
6226:   Mat            mat  = *mmat;
6227:   PetscInt       m    = *mm, n = *mn;
6228:   InsertMode     addv = *maddv;
6229:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
6230:   PetscScalar    value;

6233:   MatCheckPreallocated(mat,1);
6234:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
6235:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6236:   {
6237:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
6238:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6239:     PetscBool roworiented = aij->roworiented;

6241:     /* Some Variables required in the macro */
6242:     Mat        A                    = aij->A;
6243:     Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
6244:     PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6245:     MatScalar  *aa                  = a->a;
6246:     PetscBool  ignorezeroentries    = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6247:     Mat        B                    = aij->B;
6248:     Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
6249:     PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6250:     MatScalar  *ba                  = b->a;
6251:     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
6252:      * cannot use "#if defined" inside a macro. */
6253:     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

6255:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6256:     PetscInt  nonew = a->nonew;
6257:     MatScalar *ap1,*ap2;

6260:     for (i=0; i<m; i++) {
6261:       if (im[i] < 0) continue;
6262:       if (PetscUnlikelyDebug(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6263:       if (im[i] >= rstart && im[i] < rend) {
6264:         row      = im[i] - rstart;
6265:         lastcol1 = -1;
6266:         rp1      = aj + ai[row];
6267:         ap1      = aa + ai[row];
6268:         rmax1    = aimax[row];
6269:         nrow1    = ailen[row];
6270:         low1     = 0;
6271:         high1    = nrow1;
6272:         lastcol2 = -1;
6273:         rp2      = bj + bi[row];
6274:         ap2      = ba + bi[row];
6275:         rmax2    = bimax[row];
6276:         nrow2    = bilen[row];
6277:         low2     = 0;
6278:         high2    = nrow2;

6280:         for (j=0; j<n; j++) {
6281:           if (roworiented) value = v[i*n+j];
6282:           else value = v[i+j*m];
6283:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
6284:           if (in[j] >= cstart && in[j] < cend) {
6285:             col = in[j] - cstart;
6286:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6287: #if defined(PETSC_HAVE_DEVICE)
6288:             if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
6289: #endif
6290:           } else if (in[j] < 0) continue;
6291:           else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
6292:             /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6293:             SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
6294:           } else {
6295:             if (mat->was_assembled) {
6296:               if (!aij->colmap) {
6297:                 MatCreateColmap_MPIAIJ_Private(mat);
6298:               }
6299: #if defined(PETSC_USE_CTABLE)
6300:               PetscTableFind(aij->colmap,in[j]+1,&col);
6301:               col--;
6302: #else
6303:               col = aij->colmap[in[j]] - 1;
6304: #endif
6305:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6306:                 MatDisAssemble_MPIAIJ(mat);
6307:                 col  =  in[j];
6308:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6309:                 B        = aij->B;
6310:                 b        = (Mat_SeqAIJ*)B->data;
6311:                 bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6312:                 rp2      = bj + bi[row];
6313:                 ap2      = ba + bi[row];
6314:                 rmax2    = bimax[row];
6315:                 nrow2    = bilen[row];
6316:                 low2     = 0;
6317:                 high2    = nrow2;
6318:                 bm       = aij->B->rmap->n;
6319:                 ba       = b->a;
6320:                 inserted = PETSC_FALSE;
6321:               }
6322:             } else col = in[j];
6323:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6324: #if defined(PETSC_HAVE_DEVICE)
6325:             if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
6326: #endif
6327:           }
6328:         }
6329:       } else if (!aij->donotstash) {
6330:         if (roworiented) {
6331:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6332:         } else {
6333:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6334:         }
6335:       }
6336:     }
6337:   }
6338:   PetscFunctionReturnVoid();
6339: }

6341: typedef struct {
6342:   Mat       *mp;    /* intermediate products */
6343:   PetscBool *mptmp; /* is the intermediate product temporary ? */
6344:   PetscInt  cp;     /* number of intermediate products */

6346:   /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */
6347:   PetscInt    *startsj_s,*startsj_r;
6348:   PetscScalar *bufa;
6349:   Mat         P_oth;

6351:   /* may take advantage of merging product->B */
6352:   Mat Bloc;

6354:   /* cusparse does not have support to split between symbolic and numeric phases
6355:      When api_user is true, we don't need to update the numerical values
6356:      of the temporary storage */
6357:   PetscBool reusesym;

6359:   /* support for COO values insertion */
6360:   PetscScalar  *coo_v,*coo_w;
6361:   PetscInt     **own;
6362:   PetscInt     **off;
6363:   PetscBool    hasoffproc; /* if true, non-local values insertion (i.e. AtB or PtAP) */
6364:   PetscSF      sf; /* used for non-local values insertion and memory malloc */
6365:   PetscMemType mtype;

6367:   /* customization */
6368:   PetscBool abmerge;
6369:   PetscBool P_oth_bind;
6370: } MatMatMPIAIJBACKEND;

6372: PetscErrorCode MatDestroy_MatMatMPIAIJBACKEND(void *data)
6373: {
6374:   MatMatMPIAIJBACKEND *mmdata = (MatMatMPIAIJBACKEND*)data;
6375:   PetscInt            i;
6376:   PetscErrorCode      ierr;

6379:   PetscFree2(mmdata->startsj_s,mmdata->startsj_r);
6380:   PetscFree(mmdata->bufa);
6381:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_v);
6382:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_w);
6383:   MatDestroy(&mmdata->P_oth);
6384:   MatDestroy(&mmdata->Bloc);
6385:   PetscSFDestroy(&mmdata->sf);
6386:   for (i = 0; i < mmdata->cp; i++) {
6387:     MatDestroy(&mmdata->mp[i]);
6388:   }
6389:   PetscFree(mmdata->mp);
6390:   PetscFree(mmdata->mptmp);
6391:   PetscFree(mmdata->own[0]);
6392:   PetscFree(mmdata->own);
6393:   PetscFree(mmdata->off[0]);
6394:   PetscFree(mmdata->off);
6395:   PetscFree(mmdata);
6396:   return(0);
6397: }

6399: static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
6400: {
6401:   PetscErrorCode (*f)(Mat,PetscInt,const PetscInt[],PetscScalar[]);

6405:   PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",&f);
6406:   if (f) {
6407:     (*f)(A,n,idx,v);
6408:   } else {
6409:     const PetscScalar *vv;

6411:     MatSeqAIJGetArrayRead(A,&vv);
6412:     if (n && idx) {
6413:       PetscScalar    *w = v;
6414:       const PetscInt *oi = idx;
6415:       PetscInt       j;

6417:       for (j = 0; j < n; j++) *w++ = vv[*oi++];
6418:     } else {
6419:       PetscArraycpy(v,vv,n);
6420:     }
6421:     MatSeqAIJRestoreArrayRead(A,&vv);
6422:   }
6423:   return(0);
6424: }

6426: static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C)
6427: {
6428:   MatMatMPIAIJBACKEND *mmdata;
6429:   PetscInt            i,n_d,n_o;
6430:   PetscErrorCode      ierr;

6433:   MatCheckProduct(C,1);
6434:   if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
6435:   mmdata = (MatMatMPIAIJBACKEND*)C->product->data;
6436:   if (!mmdata->reusesym) { /* update temporary matrices */
6437:     if (mmdata->P_oth) {
6438:       MatGetBrowsOfAoCols_MPIAIJ(C->product->A,C->product->B,MAT_REUSE_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6439:     }
6440:     if (mmdata->Bloc) {
6441:       MatMPIAIJGetLocalMatMerge(C->product->B,MAT_REUSE_MATRIX,NULL,&mmdata->Bloc);
6442:     }
6443:   }
6444:   mmdata->reusesym = PETSC_FALSE;

6446:   for (i = 0; i < mmdata->cp; i++) {
6447:     if (!mmdata->mp[i]->ops->productnumeric) SETERRQ1(PetscObjectComm((PetscObject)mmdata->mp[i]),PETSC_ERR_PLIB,"Missing numeric op for %s",MatProductTypes[mmdata->mp[i]->product->type]);
6448:     (*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i]);
6449:   }
6450:   for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) {
6451:     PetscInt noff = mmdata->off[i+1] - mmdata->off[i];

6453:     if (mmdata->mptmp[i]) continue;
6454:     if (noff) {
6455:       PetscInt nown = mmdata->own[i+1] - mmdata->own[i];

6457:       MatSeqAIJCopySubArray(mmdata->mp[i],noff,mmdata->off[i],mmdata->coo_w + n_o);
6458:       MatSeqAIJCopySubArray(mmdata->mp[i],nown,mmdata->own[i],mmdata->coo_v + n_d);
6459:       n_o += noff;
6460:       n_d += nown;
6461:     } else {
6462:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mmdata->mp[i]->data;

6464:       MatSeqAIJCopySubArray(mmdata->mp[i],mm->nz,NULL,mmdata->coo_v + n_d);
6465:       n_d += mm->nz;
6466:     }
6467:   }
6468:   if (mmdata->hasoffproc) { /* offprocess insertion */
6469:     PetscSFGatherBegin(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6470:     PetscSFGatherEnd(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6471:   }
6472:   MatSetValuesCOO(C,mmdata->coo_v,INSERT_VALUES);
6473:   return(0);
6474: }

6476: /* Support for Pt * A, A * P, or Pt * A * P */
6477: #define MAX_NUMBER_INTERMEDIATE 4
6478: PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C)
6479: {
6480:   Mat_Product            *product = C->product;
6481:   Mat                    A,P,mp[MAX_NUMBER_INTERMEDIATE];
6482:   Mat_MPIAIJ             *a,*p;
6483:   MatMatMPIAIJBACKEND    *mmdata;
6484:   ISLocalToGlobalMapping P_oth_l2g = NULL;
6485:   IS                     glob = NULL;
6486:   const char             *prefix;
6487:   char                   pprefix[256];
6488:   const PetscInt         *globidx,*P_oth_idx;
6489:   const PetscInt         *cmapa[MAX_NUMBER_INTERMEDIATE],*rmapa[MAX_NUMBER_INTERMEDIATE];
6490:   PetscInt               cp = 0,m,n,M,N,ncoo,ncoo_d,ncoo_o,ncoo_oown,*coo_i,*coo_j,cmapt[MAX_NUMBER_INTERMEDIATE],rmapt[MAX_NUMBER_INTERMEDIATE],i,j;
6491:   MatProductType         ptype;
6492:   PetscBool              mptmp[MAX_NUMBER_INTERMEDIATE],hasoffproc = PETSC_FALSE,iscuda,iskokk;
6493:   PetscMPIInt            size;
6494:   PetscErrorCode         ierr;

6497:   MatCheckProduct(C,1);
6498:   if (product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
6499:   ptype = product->type;
6500:   if (product->A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
6501:   switch (ptype) {
6502:   case MATPRODUCT_AB:
6503:     A = product->A;
6504:     P = product->B;
6505:     m = A->rmap->n;
6506:     n = P->cmap->n;
6507:     M = A->rmap->N;
6508:     N = P->cmap->N;
6509:     break;
6510:   case MATPRODUCT_AtB:
6511:     P = product->A;
6512:     A = product->B;
6513:     m = P->cmap->n;
6514:     n = A->cmap->n;
6515:     M = P->cmap->N;
6516:     N = A->cmap->N;
6517:     hasoffproc = PETSC_TRUE;
6518:     break;
6519:   case MATPRODUCT_PtAP:
6520:     A = product->A;
6521:     P = product->B;
6522:     m = P->cmap->n;
6523:     n = P->cmap->n;
6524:     M = P->cmap->N;
6525:     N = P->cmap->N;
6526:     hasoffproc = PETSC_TRUE;
6527:     break;
6528:   default:
6529:     SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6530:   }
6531:   MPI_Comm_size(PetscObjectComm((PetscObject)C),&size);
6532:   if (size == 1) hasoffproc = PETSC_FALSE;

6534:   /* defaults */
6535:   for (i=0;i<MAX_NUMBER_INTERMEDIATE;i++) {
6536:     mp[i]    = NULL;
6537:     mptmp[i] = PETSC_FALSE;
6538:     rmapt[i] = -1;
6539:     cmapt[i] = -1;
6540:     rmapa[i] = NULL;
6541:     cmapa[i] = NULL;
6542:   }

6544:   /* customization */
6545:   PetscNew(&mmdata);
6546:   mmdata->reusesym = product->api_user;
6547:   if (ptype == MATPRODUCT_AB) {
6548:     if (product->api_user) {
6549:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
6550:       PetscOptionsBool("-matmatmult_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6551:       PetscOptionsBool("-matmatmult_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6552:       PetscOptionsEnd();
6553:     } else {
6554:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
6555:       PetscOptionsBool("-matproduct_ab_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6556:       PetscOptionsBool("-matproduct_ab_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6557:       PetscOptionsEnd();
6558:     }
6559:   } else if (ptype == MATPRODUCT_PtAP) {
6560:     if (product->api_user) {
6561:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
6562:       PetscOptionsBool("-matptap_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6563:       PetscOptionsEnd();
6564:     } else {
6565:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
6566:       PetscOptionsBool("-matproduct_ptap_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6567:       PetscOptionsEnd();
6568:     }
6569:   }
6570:   a = (Mat_MPIAIJ*)A->data;
6571:   p = (Mat_MPIAIJ*)P->data;
6572:   MatSetSizes(C,m,n,M,N);
6573:   PetscLayoutSetUp(C->rmap);
6574:   PetscLayoutSetUp(C->cmap);
6575:   MatSetType(C,((PetscObject)A)->type_name);
6576:   MatGetOptionsPrefix(C,&prefix);
6577:   switch (ptype) {
6578:   case MATPRODUCT_AB: /* A * P */
6579:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);

6581:     if (mmdata->abmerge) { /* A_diag * P_loc and A_off * P_oth */
6582:       /* P is product->B */
6583:       MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6584:       MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6585:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6586:       MatProductSetFill(mp[cp],product->fill);
6587:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6588:       MatSetOptionsPrefix(mp[cp],prefix);
6589:       MatAppendOptionsPrefix(mp[cp],pprefix);
6590:       mp[cp]->product->api_user = product->api_user;
6591:       MatProductSetFromOptions(mp[cp]);
6592:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6593:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6594:       ISGetIndices(glob,&globidx);
6595:       rmapt[cp] = 1;
6596:       cmapt[cp] = 2;
6597:       cmapa[cp] = globidx;
6598:       mptmp[cp] = PETSC_FALSE;
6599:       cp++;
6600:     } else { /* A_diag * P_diag and A_diag * P_off and A_off * P_oth */
6601:       MatProductCreate(a->A,p->A,NULL,&mp[cp]);
6602:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6603:       MatProductSetFill(mp[cp],product->fill);
6604:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6605:       MatSetOptionsPrefix(mp[cp],prefix);
6606:       MatAppendOptionsPrefix(mp[cp],pprefix);
6607:       mp[cp]->product->api_user = product->api_user;
6608:       MatProductSetFromOptions(mp[cp]);
6609:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6610:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6611:       rmapt[cp] = 1;
6612:       cmapt[cp] = 1;
6613:       mptmp[cp] = PETSC_FALSE;
6614:       cp++;
6615:       MatProductCreate(a->A,p->B,NULL,&mp[cp]);
6616:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6617:       MatProductSetFill(mp[cp],product->fill);
6618:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6619:       MatSetOptionsPrefix(mp[cp],prefix);
6620:       MatAppendOptionsPrefix(mp[cp],pprefix);
6621:       mp[cp]->product->api_user = product->api_user;
6622:       MatProductSetFromOptions(mp[cp]);
6623:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6624:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6625:       rmapt[cp] = 1;
6626:       cmapt[cp] = 2;
6627:       cmapa[cp] = p->garray;
6628:       mptmp[cp] = PETSC_FALSE;
6629:       cp++;
6630:     }
6631:     if (mmdata->P_oth) {
6632:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g);
6633:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6634:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6635:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6636:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6637:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6638:       MatProductSetFill(mp[cp],product->fill);
6639:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6640:       MatSetOptionsPrefix(mp[cp],prefix);
6641:       MatAppendOptionsPrefix(mp[cp],pprefix);
6642:       mp[cp]->product->api_user = product->api_user;
6643:       MatProductSetFromOptions(mp[cp]);
6644:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6645:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6646:       rmapt[cp] = 1;
6647:       cmapt[cp] = 2;
6648:       cmapa[cp] = P_oth_idx;
6649:       mptmp[cp] = PETSC_FALSE;
6650:       cp++;
6651:     }
6652:     break;
6653:   case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */
6654:     /* A is product->B */
6655:     MatMPIAIJGetLocalMatMerge(A,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6656:     if (A == P) {
6657:       MatProductCreate(mmdata->Bloc,mmdata->Bloc,NULL,&mp[cp]);
6658:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6659:       MatProductSetFill(mp[cp],product->fill);
6660:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6661:       MatSetOptionsPrefix(mp[cp],prefix);
6662:       MatAppendOptionsPrefix(mp[cp],pprefix);
6663:       mp[cp]->product->api_user = product->api_user;
6664:       MatProductSetFromOptions(mp[cp]);
6665:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6666:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6667:       ISGetIndices(glob,&globidx);
6668:       rmapt[cp] = 2;
6669:       rmapa[cp] = globidx;
6670:       cmapt[cp] = 2;
6671:       cmapa[cp] = globidx;
6672:       mptmp[cp] = PETSC_FALSE;
6673:       cp++;
6674:     } else {
6675:       MatProductCreate(p->A,mmdata->Bloc,NULL,&mp[cp]);
6676:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6677:       MatProductSetFill(mp[cp],product->fill);
6678:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6679:       MatSetOptionsPrefix(mp[cp],prefix);
6680:       MatAppendOptionsPrefix(mp[cp],pprefix);
6681:       mp[cp]->product->api_user = product->api_user;
6682:       MatProductSetFromOptions(mp[cp]);
6683:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6684:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6685:       ISGetIndices(glob,&globidx);
6686:       rmapt[cp] = 1;
6687:       cmapt[cp] = 2;
6688:       cmapa[cp] = globidx;
6689:       mptmp[cp] = PETSC_FALSE;
6690:       cp++;
6691:       MatProductCreate(p->B,mmdata->Bloc,NULL,&mp[cp]);
6692:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6693:       MatProductSetFill(mp[cp],product->fill);
6694:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6695:       MatSetOptionsPrefix(mp[cp],prefix);
6696:       MatAppendOptionsPrefix(mp[cp],pprefix);
6697:       mp[cp]->product->api_user = product->api_user;
6698:       MatProductSetFromOptions(mp[cp]);
6699:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6700:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6701:       rmapt[cp] = 2;
6702:       rmapa[cp] = p->garray;
6703:       cmapt[cp] = 2;
6704:       cmapa[cp] = globidx;
6705:       mptmp[cp] = PETSC_FALSE;
6706:       cp++;
6707:     }
6708:     break;
6709:   case MATPRODUCT_PtAP:
6710:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6711:     /* P is product->B */
6712:     MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6713:     MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6714:     MatProductSetType(mp[cp],MATPRODUCT_PtAP);
6715:     MatProductSetFill(mp[cp],product->fill);
6716:     PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6717:     MatSetOptionsPrefix(mp[cp],prefix);
6718:     MatAppendOptionsPrefix(mp[cp],pprefix);
6719:     mp[cp]->product->api_user = product->api_user;
6720:     MatProductSetFromOptions(mp[cp]);
6721:     if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6722:     (*mp[cp]->ops->productsymbolic)(mp[cp]);
6723:     ISGetIndices(glob,&globidx);
6724:     rmapt[cp] = 2;
6725:     rmapa[cp] = globidx;
6726:     cmapt[cp] = 2;
6727:     cmapa[cp] = globidx;
6728:     mptmp[cp] = PETSC_FALSE;
6729:     cp++;
6730:     if (mmdata->P_oth) {
6731:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g);
6732:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6733:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6734:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6735:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6736:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6737:       MatProductSetFill(mp[cp],product->fill);
6738:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6739:       MatSetOptionsPrefix(mp[cp],prefix);
6740:       MatAppendOptionsPrefix(mp[cp],pprefix);
6741:       mp[cp]->product->api_user = product->api_user;
6742:       MatProductSetFromOptions(mp[cp]);
6743:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6744:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6745:       mptmp[cp] = PETSC_TRUE;
6746:       cp++;
6747:       MatProductCreate(mmdata->Bloc,mp[1],NULL,&mp[cp]);
6748:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6749:       MatProductSetFill(mp[cp],product->fill);
6750:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6751:       MatSetOptionsPrefix(mp[cp],prefix);
6752:       MatAppendOptionsPrefix(mp[cp],pprefix);
6753:       mp[cp]->product->api_user = product->api_user;
6754:       MatProductSetFromOptions(mp[cp]);
6755:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6756:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6757:       rmapt[cp] = 2;
6758:       rmapa[cp] = globidx;
6759:       cmapt[cp] = 2;
6760:       cmapa[cp] = P_oth_idx;
6761:       mptmp[cp] = PETSC_FALSE;
6762:       cp++;
6763:     }
6764:     break;
6765:   default:
6766:     SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6767:   }
6768:   /* sanity check */
6769:   if (size > 1) for (i = 0; i < cp; i++) if (rmapt[i] == 2 && !hasoffproc) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Unexpected offproc map type for product %D",i);

6771:   PetscMalloc1(cp,&mmdata->mp);
6772:   for (i = 0; i < cp; i++) mmdata->mp[i] = mp[i];
6773:   PetscMalloc1(cp,&mmdata->mptmp);
6774:   for (i = 0; i < cp; i++) mmdata->mptmp[i] = mptmp[i];
6775:   mmdata->cp = cp;
6776:   C->product->data       = mmdata;
6777:   C->product->destroy    = MatDestroy_MatMatMPIAIJBACKEND;
6778:   C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND;

6780:   /* memory type */
6781:   mmdata->mtype = PETSC_MEMTYPE_HOST;
6782:   PetscObjectTypeCompareAny((PetscObject)C,&iscuda,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,"");
6783:   PetscObjectTypeCompareAny((PetscObject)C,&iskokk,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,"");
6784:   if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA;
6785:   // enable the line below MatSeqAIJCopySubArray_SeqAIJKokkos is implemented
6786:   //else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_DEVICE;

6788:   /* prepare coo coordinates for values insertion */
6789:   for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) {
6790:     Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
6791:     if (mptmp[cp]) continue;
6792:     if (rmapt[cp] == 2 && hasoffproc) {
6793:       const PetscInt *rmap = rmapa[cp];
6794:       const PetscInt mr = mp[cp]->rmap->n;
6795:       const PetscInt rs = C->rmap->rstart;
6796:       const PetscInt re = C->rmap->rend;
6797:       const PetscInt *ii  = mm->i;
6798:       for (i = 0; i < mr; i++) {
6799:         const PetscInt gr = rmap[i];
6800:         const PetscInt nz = ii[i+1] - ii[i];
6801:         if (gr < rs || gr >= re) ncoo_o += nz;
6802:         else ncoo_oown += nz;
6803:       }
6804:     } else ncoo_d += mm->nz;
6805:   }
6806:   PetscCalloc1(mmdata->cp+1,&mmdata->off);
6807:   PetscCalloc1(mmdata->cp+1,&mmdata->own);
6808:   if (hasoffproc) { /* handle offproc values insertion */
6809:     PetscSF  msf;
6810:     PetscInt ncoo2,*coo_i2,*coo_j2;

6812:     PetscMalloc1(ncoo_o,&mmdata->off[0]);
6813:     PetscMalloc1(ncoo_oown,&mmdata->own[0]);
6814:     PetscMalloc2(ncoo_o,&coo_i,ncoo_o,&coo_j);
6815:     for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) {
6816:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
6817:       PetscInt   *idxoff = mmdata->off[cp];
6818:       PetscInt   *idxown = mmdata->own[cp];
6819:       if (!mptmp[cp] && rmapt[cp] == 2) {
6820:         const PetscInt *rmap = rmapa[cp];
6821:         const PetscInt *cmap = cmapa[cp];
6822:         const PetscInt *ii  = mm->i;
6823:         PetscInt       *coi = coo_i + ncoo_o;
6824:         PetscInt       *coj = coo_j + ncoo_o;
6825:         const PetscInt mr = mp[cp]->rmap->n;
6826:         const PetscInt rs = C->rmap->rstart;
6827:         const PetscInt re = C->rmap->rend;
6828:         const PetscInt cs = C->cmap->rstart;
6829:         for (i = 0; i < mr; i++) {
6830:           const PetscInt *jj = mm->j + ii[i];
6831:           const PetscInt gr  = rmap[i];
6832:           const PetscInt nz  = ii[i+1] - ii[i];
6833:           if (gr < rs || gr >= re) {
6834:             for (j = ii[i]; j < ii[i+1]; j++) {
6835:               *coi++ = gr;
6836:               *idxoff++ = j;
6837:             }
6838:             if (!cmapt[cp]) { /* already global */
6839:               for (j = 0; j < nz; j++) *coj++ = jj[j];
6840:             } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
6841:               for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
6842:             } else { /* offdiag */
6843:               for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
6844:             }
6845:             ncoo_o += nz;
6846:           } else {
6847:             for (j = ii[i]; j < ii[i+1]; j++) *idxown++ = j;
6848:           }
6849:         }
6850:       }
6851:       mmdata->off[cp + 1] = idxoff;
6852:       mmdata->own[cp + 1] = idxown;
6853:     }

6855:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
6856:     PetscSFSetGraphLayout(mmdata->sf,C->rmap,ncoo_o,NULL,PETSC_OWN_POINTER,coo_i);
6857:     PetscSFGetMultiSF(mmdata->sf,&msf);
6858:     PetscSFGetGraph(msf,&ncoo2,NULL,NULL,NULL);
6859:     ncoo = ncoo_d + ncoo_oown + ncoo2;
6860:     PetscMalloc2(ncoo,&coo_i2,ncoo,&coo_j2);
6861:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown);
6862:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown);
6863:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
6864:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
6865:     PetscFree2(coo_i,coo_j);
6866:     PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo_o*sizeof(PetscScalar),(void**)&mmdata->coo_w);
6867:     coo_i = coo_i2;
6868:     coo_j = coo_j2;
6869:   } else { /* no offproc values insertion */
6870:     ncoo = ncoo_d;
6871:     PetscMalloc2(ncoo,&coo_i,ncoo,&coo_j);

6873:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
6874:     PetscSFSetGraph(mmdata->sf,0,0,NULL,PETSC_OWN_POINTER,NULL,PETSC_OWN_POINTER);
6875:     PetscSFSetUp(mmdata->sf);
6876:   }
6877:   mmdata->hasoffproc = hasoffproc;

6879:   /* on-process indices */
6880:   for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) {
6881:     Mat_SeqAIJ     *mm = (Mat_SeqAIJ*)mp[cp]->data;
6882:     PetscInt       *coi = coo_i + ncoo_d;
6883:     PetscInt       *coj = coo_j + ncoo_d;
6884:     const PetscInt *jj  = mm->j;
6885:     const PetscInt *ii  = mm->i;
6886:     const PetscInt *cmap = cmapa[cp];
6887:     const PetscInt *rmap = rmapa[cp];
6888:     const PetscInt mr = mp[cp]->rmap->n;
6889:     const PetscInt rs = C->rmap->rstart;
6890:     const PetscInt re = C->rmap->rend;
6891:     const PetscInt cs = C->cmap->rstart;

6893:     if (mptmp[cp]) continue;
6894:     if (rmapt[cp] == 1) {
6895:       for (i = 0; i < mr; i++) {
6896:         const PetscInt gr = i + rs;
6897:         for (j = ii[i]; j < ii[i+1]; j++) coi[j] = gr;
6898:       }
6899:       /* columns coo */
6900:       if (!cmapt[cp]) {
6901:         PetscArraycpy(coj,jj,mm->nz);
6902:       } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
6903:         for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs;
6904:       } else { /* offdiag */
6905:         for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]];
6906:       }
6907:       ncoo_d += mm->nz;
6908:     } else if (rmapt[cp] == 2) {
6909:       for (i = 0; i < mr; i++) {
6910:         const PetscInt *jj = mm->j + ii[i];
6911:         const PetscInt gr  = rmap[i];
6912:         const PetscInt nz  = ii[i+1] - ii[i];
6913:         if (gr >= rs && gr < re) {
6914:           for (j = ii[i]; j < ii[i+1]; j++) *coi++ = gr;
6915:           if (!cmapt[cp]) { /* already global */
6916:             for (j = 0; j < nz; j++) *coj++ = jj[j];
6917:           } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
6918:             for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
6919:           } else { /* offdiag */
6920:             for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
6921:           }
6922:           ncoo_d += nz;
6923:         }
6924:       }
6925:     }
6926:   }
6927:   if (glob) {
6928:     ISRestoreIndices(glob,&globidx);
6929:   }
6930:   ISDestroy(&glob);
6931:   if (P_oth_l2g) {
6932:     ISLocalToGlobalMappingRestoreIndices(P_oth_l2g,&P_oth_idx);
6933:   }
6934:   ISLocalToGlobalMappingDestroy(&P_oth_l2g);
6935:   PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo*sizeof(PetscScalar),(void**)&mmdata->coo_v);

6937:   /* preallocate with COO data */
6938:   MatSetPreallocationCOO(C,ncoo,coo_i,coo_j);
6939:   PetscFree2(coo_i,coo_j);
6940:   return(0);
6941: }

6943: PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat)
6944: {
6945:   Mat_Product    *product = mat->product;
6947: #if defined(PETSC_HAVE_DEVICE)
6948:   PetscBool      match = PETSC_FALSE;
6949:   PetscBool      usecpu = PETSC_FALSE;
6950: #else
6951:   PetscBool      match = PETSC_TRUE;
6952: #endif

6955:   MatCheckProduct(mat,1);
6956: #if defined(PETSC_HAVE_DEVICE)
6957:   if (!product->A->boundtocpu && !product->B->boundtocpu) {
6958:     PetscObjectTypeCompare((PetscObject)product->B,((PetscObject)product->A)->type_name,&match);
6959:   }
6960:   if (match) { /* we can always fallback to the CPU if requested */
6961:     switch (product->type) {
6962:     case MATPRODUCT_AB:
6963:       if (product->api_user) {
6964:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");
6965:         PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
6966:         PetscOptionsEnd();
6967:       } else {
6968:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");
6969:         PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
6970:         PetscOptionsEnd();
6971:       }
6972:       break;
6973:     case MATPRODUCT_AtB:
6974:       if (product->api_user) {
6975:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");
6976:         PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
6977:         PetscOptionsEnd();
6978:       } else {
6979:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");
6980:         PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
6981:         PetscOptionsEnd();
6982:       }
6983:       break;
6984:     case MATPRODUCT_PtAP:
6985:       if (product->api_user) {
6986:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");
6987:         PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
6988:         PetscOptionsEnd();
6989:       } else {
6990:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");
6991:         PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
6992:         PetscOptionsEnd();
6993:       }
6994:       break;
6995:     default:
6996:       break;
6997:     }
6998:     match = (PetscBool)!usecpu;
6999:   }
7000: #endif
7001:   if (match) {
7002:     switch (product->type) {
7003:     case MATPRODUCT_AB:
7004:     case MATPRODUCT_AtB:
7005:     case MATPRODUCT_PtAP:
7006:       mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND;
7007:       break;
7008:     default:
7009:       break;
7010:     }
7011:   }
7012:   /* fallback to MPIAIJ ops */
7013:   if (!mat->ops->productsymbolic) {
7014:     MatProductSetFromOptions_MPIAIJ(mat);
7015:   }
7016:   return(0);
7017: }