Actual source code: mpiaij.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: #include <petsc/private/vecimpl.h>
  3: #include <petsc/private/sfimpl.h>
  4: #include <petsc/private/isimpl.h>
  5: #include <petscblaslapack.h>
  6: #include <petscsf.h>
  7: #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 54:   A->boundtocpu = flg;
 55: #endif
 56:   if (a->A) {
 57:     MatBindToCPU(a->A,flg);
 58:   }
 59:   if (a->B) {
 60:     MatBindToCPU(a->B,flg);
 61:   }
 62:   return(0);
 63: }

 65: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 66: {
 68:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 71:   if (mat->A) {
 72:     MatSetBlockSizes(mat->A,rbs,cbs);
 73:     MatSetBlockSizes(mat->B,rbs,1);
 74:   }
 75:   return(0);
 76: }

 78: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 79: {
 80:   PetscErrorCode  ierr;
 81:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 82:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 83:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 84:   const PetscInt  *ia,*ib;
 85:   const MatScalar *aa,*bb,*aav,*bav;
 86:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 87:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 90:   *keptrows = NULL;

 92:   ia   = a->i;
 93:   ib   = b->i;
 94:   MatSeqAIJGetArrayRead(mat->A,&aav);
 95:   MatSeqAIJGetArrayRead(mat->B,&bav);
 96:   for (i=0; i<m; i++) {
 97:     na = ia[i+1] - ia[i];
 98:     nb = ib[i+1] - ib[i];
 99:     if (!na && !nb) {
100:       cnt++;
101:       goto ok1;
102:     }
103:     aa = aav + ia[i];
104:     for (j=0; j<na; j++) {
105:       if (aa[j] != 0.0) goto ok1;
106:     }
107:     bb = bav + ib[i];
108:     for (j=0; j <nb; j++) {
109:       if (bb[j] != 0.0) goto ok1;
110:     }
111:     cnt++;
112: ok1:;
113:   }
114:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
115:   if (!n0rows) {
116:     MatSeqAIJRestoreArrayRead(mat->A,&aav);
117:     MatSeqAIJRestoreArrayRead(mat->B,&bav);
118:     return(0);
119:   }
120:   PetscMalloc1(M->rmap->n-cnt,&rows);
121:   cnt  = 0;
122:   for (i=0; i<m; i++) {
123:     na = ia[i+1] - ia[i];
124:     nb = ib[i+1] - ib[i];
125:     if (!na && !nb) continue;
126:     aa = aav + ia[i];
127:     for (j=0; j<na;j++) {
128:       if (aa[j] != 0.0) {
129:         rows[cnt++] = rstart + i;
130:         goto ok2;
131:       }
132:     }
133:     bb = bav + ib[i];
134:     for (j=0; j<nb; j++) {
135:       if (bb[j] != 0.0) {
136:         rows[cnt++] = rstart + i;
137:         goto ok2;
138:       }
139:     }
140: ok2:;
141:   }
142:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
143:   MatSeqAIJRestoreArrayRead(mat->A,&aav);
144:   MatSeqAIJRestoreArrayRead(mat->B,&bav);
145:   return(0);
146: }

148: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
149: {
150:   PetscErrorCode    ierr;
151:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
152:   PetscBool         cong;

155:   MatHasCongruentLayouts(Y,&cong);
156:   if (Y->assembled && cong) {
157:     MatDiagonalSet(aij->A,D,is);
158:   } else {
159:     MatDiagonalSet_Default(Y,D,is);
160:   }
161:   return(0);
162: }

164: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
165: {
166:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
168:   PetscInt       i,rstart,nrows,*rows;

171:   *zrows = NULL;
172:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
173:   MatGetOwnershipRange(M,&rstart,NULL);
174:   for (i=0; i<nrows; i++) rows[i] += rstart;
175:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
176:   return(0);
177: }

179: PetscErrorCode MatGetColumnReductions_MPIAIJ(Mat A,PetscInt type,PetscReal *reductions)
180: {
181:   PetscErrorCode    ierr;
182:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)A->data;
183:   PetscInt          i,m,n,*garray = aij->garray;
184:   Mat_SeqAIJ        *a_aij = (Mat_SeqAIJ*) aij->A->data;
185:   Mat_SeqAIJ        *b_aij = (Mat_SeqAIJ*) aij->B->data;
186:   PetscReal         *work;
187:   const PetscScalar *dummy;

190:   MatGetSize(A,&m,&n);
191:   PetscCalloc1(n,&work);
192:   MatSeqAIJGetArrayRead(aij->A,&dummy);
193:   MatSeqAIJRestoreArrayRead(aij->A,&dummy);
194:   MatSeqAIJGetArrayRead(aij->B,&dummy);
195:   MatSeqAIJRestoreArrayRead(aij->B,&dummy);
196:   if (type == NORM_2) {
197:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
198:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
199:     }
200:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
201:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
202:     }
203:   } else if (type == NORM_1) {
204:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
205:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
206:     }
207:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
208:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
209:     }
210:   } else if (type == NORM_INFINITY) {
211:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
212:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
213:     }
214:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
215:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
216:     }
217:   } else if (type == REDUCTION_SUM_REALPART || type == REDUCTION_MEAN_REALPART) {
218:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
219:       work[A->cmap->rstart + a_aij->j[i]] += PetscRealPart(a_aij->a[i]);
220:     }
221:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
222:       work[garray[b_aij->j[i]]] += PetscRealPart(b_aij->a[i]);
223:     }
224:   } else if (type == REDUCTION_SUM_IMAGINARYPART || type == REDUCTION_MEAN_IMAGINARYPART) {
225:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
226:       work[A->cmap->rstart + a_aij->j[i]] += PetscImaginaryPart(a_aij->a[i]);
227:     }
228:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
229:       work[garray[b_aij->j[i]]] += PetscImaginaryPart(b_aij->a[i]);
230:     }
231:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown reduction type");
232:   if (type == NORM_INFINITY) {
233:     MPIU_Allreduce(work,reductions,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
234:   } else {
235:     MPIU_Allreduce(work,reductions,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
236:   }
237:   PetscFree(work);
238:   if (type == NORM_2) {
239:     for (i=0; i<n; i++) reductions[i] = PetscSqrtReal(reductions[i]);
240:   } else if (type == REDUCTION_MEAN_REALPART || type == REDUCTION_MEAN_IMAGINARYPART) {
241:     for (i=0; i<n; i++) reductions[i] /= m;
242:   }
243:   return(0);
244: }

246: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
247: {
248:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
249:   IS              sis,gis;
250:   PetscErrorCode  ierr;
251:   const PetscInt  *isis,*igis;
252:   PetscInt        n,*iis,nsis,ngis,rstart,i;

255:   MatFindOffBlockDiagonalEntries(a->A,&sis);
256:   MatFindNonzeroRows(a->B,&gis);
257:   ISGetSize(gis,&ngis);
258:   ISGetSize(sis,&nsis);
259:   ISGetIndices(sis,&isis);
260:   ISGetIndices(gis,&igis);

262:   PetscMalloc1(ngis+nsis,&iis);
263:   PetscArraycpy(iis,igis,ngis);
264:   PetscArraycpy(iis+ngis,isis,nsis);
265:   n    = ngis + nsis;
266:   PetscSortRemoveDupsInt(&n,iis);
267:   MatGetOwnershipRange(A,&rstart,NULL);
268:   for (i=0; i<n; i++) iis[i] += rstart;
269:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

271:   ISRestoreIndices(sis,&isis);
272:   ISRestoreIndices(gis,&igis);
273:   ISDestroy(&sis);
274:   ISDestroy(&gis);
275:   return(0);
276: }

278: /*
279:   Local utility routine that creates a mapping from the global column
280: number to the local number in the off-diagonal part of the local
281: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
282: a slightly higher hash table cost; without it it is not scalable (each processor
283: has an order N integer array but is fast to access.
284: */
285: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
286: {
287:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
289:   PetscInt       n = aij->B->cmap->n,i;

292:   if (n && !aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
293: #if defined(PETSC_USE_CTABLE)
294:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
295:   for (i=0; i<n; i++) {
296:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
297:   }
298: #else
299:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
300:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
301:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
302: #endif
303:   return(0);
304: }

306: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
307: { \
308:     if (col <= lastcol1)  low1 = 0;     \
309:     else                 high1 = nrow1; \
310:     lastcol1 = col;\
311:     while (high1-low1 > 5) { \
312:       t = (low1+high1)/2; \
313:       if (rp1[t] > col) high1 = t; \
314:       else              low1  = t; \
315:     } \
316:       for (_i=low1; _i<high1; _i++) { \
317:         if (rp1[_i] > col) break; \
318:         if (rp1[_i] == col) { \
319:           if (addv == ADD_VALUES) { \
320:             ap1[_i] += value;   \
321:             /* Not sure LogFlops will slow dow the code or not */ \
322:             (void)PetscLogFlops(1.0);   \
323:            } \
324:           else                    ap1[_i] = value; \
325:           inserted = PETSC_TRUE; \
326:           goto a_noinsert; \
327:         } \
328:       }  \
329:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
330:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
331:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
332:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
333:       N = nrow1++ - 1; a->nz++; high1++; \
334:       /* shift up all the later entries in this row */ \
335:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
336:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
337:       rp1[_i] = col;  \
338:       ap1[_i] = value;  \
339:       A->nonzerostate++;\
340:       a_noinsert: ; \
341:       ailen[row] = nrow1; \
342: }

344: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
345:   { \
346:     if (col <= lastcol2) low2 = 0;                        \
347:     else high2 = nrow2;                                   \
348:     lastcol2 = col;                                       \
349:     while (high2-low2 > 5) {                              \
350:       t = (low2+high2)/2;                                 \
351:       if (rp2[t] > col) high2 = t;                        \
352:       else             low2  = t;                         \
353:     }                                                     \
354:     for (_i=low2; _i<high2; _i++) {                       \
355:       if (rp2[_i] > col) break;                           \
356:       if (rp2[_i] == col) {                               \
357:         if (addv == ADD_VALUES) {                         \
358:           ap2[_i] += value;                               \
359:           (void)PetscLogFlops(1.0);                       \
360:         }                                                 \
361:         else                    ap2[_i] = value;          \
362:         inserted = PETSC_TRUE;                            \
363:         goto b_noinsert;                                  \
364:       }                                                   \
365:     }                                                     \
366:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
367:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
368:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
369:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
370:     N = nrow2++ - 1; b->nz++; high2++;                    \
371:     /* shift up all the later entries in this row */      \
372:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
373:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
374:     rp2[_i] = col;                                        \
375:     ap2[_i] = value;                                      \
376:     B->nonzerostate++;                                    \
377:     b_noinsert: ;                                         \
378:     bilen[row] = nrow2;                                   \
379:   }

381: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
382: {
383:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
384:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
386:   PetscInt       l,*garray = mat->garray,diag;

389:   /* code only works for square matrices A */

391:   /* find size of row to the left of the diagonal part */
392:   MatGetOwnershipRange(A,&diag,NULL);
393:   row  = row - diag;
394:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
395:     if (garray[b->j[b->i[row]+l]] > diag) break;
396:   }
397:   PetscArraycpy(b->a+b->i[row],v,l);

399:   /* diagonal part */
400:   PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));

402:   /* right of diagonal part */
403:   PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
404: #if defined(PETSC_HAVE_DEVICE)
405:   if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (l || (a->i[row+1]-a->i[row]) || (b->i[row+1]-b->i[row]-l))) A->offloadmask = PETSC_OFFLOAD_CPU;
406: #endif
407:   return(0);
408: }

410: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
411: {
412:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
413:   PetscScalar    value = 0.0;
415:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
416:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
417:   PetscBool      roworiented = aij->roworiented;

419:   /* Some Variables required in the macro */
420:   Mat        A                    = aij->A;
421:   Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
422:   PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
423:   PetscBool  ignorezeroentries    = a->ignorezeroentries;
424:   Mat        B                    = aij->B;
425:   Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
426:   PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
427:   MatScalar  *aa,*ba;
428:   /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
429:    * cannot use "#if defined" inside a macro. */
430:   PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

432:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
433:   PetscInt  nonew;
434:   MatScalar *ap1,*ap2;

437: #if defined(PETSC_HAVE_DEVICE)
438:   if (A->offloadmask == PETSC_OFFLOAD_GPU) {
439:     const PetscScalar *dummy;
440:     MatSeqAIJGetArrayRead(A,&dummy);
441:     MatSeqAIJRestoreArrayRead(A,&dummy);
442:   }
443:   if (B->offloadmask == PETSC_OFFLOAD_GPU) {
444:     const PetscScalar *dummy;
445:     MatSeqAIJGetArrayRead(B,&dummy);
446:     MatSeqAIJRestoreArrayRead(B,&dummy);
447:   }
448: #endif
449:   aa = a->a;
450:   ba = b->a;
451:   for (i=0; i<m; i++) {
452:     if (im[i] < 0) continue;
453:     if (PetscUnlikely(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
454:     if (im[i] >= rstart && im[i] < rend) {
455:       row      = im[i] - rstart;
456:       lastcol1 = -1;
457:       rp1      = aj + ai[row];
458:       ap1      = aa + ai[row];
459:       rmax1    = aimax[row];
460:       nrow1    = ailen[row];
461:       low1     = 0;
462:       high1    = nrow1;
463:       lastcol2 = -1;
464:       rp2      = bj + bi[row];
465:       ap2      = ba + bi[row];
466:       rmax2    = bimax[row];
467:       nrow2    = bilen[row];
468:       low2     = 0;
469:       high2    = nrow2;

471:       for (j=0; j<n; j++) {
472:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
473:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
474:         if (in[j] >= cstart && in[j] < cend) {
475:           col   = in[j] - cstart;
476:           nonew = a->nonew;
477:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
478: #if defined(PETSC_HAVE_DEVICE)
479:           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
480: #endif
481:         } else if (in[j] < 0) continue;
482:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
483:         else {
484:           if (mat->was_assembled) {
485:             if (!aij->colmap) {
486:               MatCreateColmap_MPIAIJ_Private(mat);
487:             }
488: #if defined(PETSC_USE_CTABLE)
489:             PetscTableFind(aij->colmap,in[j]+1,&col);
490:             col--;
491: #else
492:             col = aij->colmap[in[j]] - 1;
493: #endif
494:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
495:               MatDisAssemble_MPIAIJ(mat);
496:               col  =  in[j];
497:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
498:               B        = aij->B;
499:               b        = (Mat_SeqAIJ*)B->data;
500:               bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
501:               rp2      = bj + bi[row];
502:               ap2      = ba + bi[row];
503:               rmax2    = bimax[row];
504:               nrow2    = bilen[row];
505:               low2     = 0;
506:               high2    = nrow2;
507:               bm       = aij->B->rmap->n;
508:               ba       = b->a;
509:               inserted = PETSC_FALSE;
510:             } else if (col < 0 && !(ignorezeroentries && value == 0.0)) {
511:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
512:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
513:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
514:             }
515:           } else col = in[j];
516:           nonew = b->nonew;
517:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
518: #if defined(PETSC_HAVE_DEVICE)
519:           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
520: #endif
521:         }
522:       }
523:     } else {
524:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
525:       if (!aij->donotstash) {
526:         mat->assembled = PETSC_FALSE;
527:         if (roworiented) {
528:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
529:         } else {
530:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
531:         }
532:       }
533:     }
534:   }
535:   return(0);
536: }

538: /*
539:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
540:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
541:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
542: */
543: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
544: {
545:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
546:   Mat            A           = aij->A; /* diagonal part of the matrix */
547:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
548:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
549:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
550:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
551:   PetscInt       *ailen      = a->ilen,*aj = a->j;
552:   PetscInt       *bilen      = b->ilen,*bj = b->j;
553:   PetscInt       am          = aij->A->rmap->n,j;
554:   PetscInt       diag_so_far = 0,dnz;
555:   PetscInt       offd_so_far = 0,onz;

558:   /* Iterate over all rows of the matrix */
559:   for (j=0; j<am; j++) {
560:     dnz = onz = 0;
561:     /*  Iterate over all non-zero columns of the current row */
562:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
563:       /* If column is in the diagonal */
564:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
565:         aj[diag_so_far++] = mat_j[col] - cstart;
566:         dnz++;
567:       } else { /* off-diagonal entries */
568:         bj[offd_so_far++] = mat_j[col];
569:         onz++;
570:       }
571:     }
572:     ailen[j] = dnz;
573:     bilen[j] = onz;
574:   }
575:   return(0);
576: }

578: /*
579:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
580:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
581:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
582:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
583:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
584: */
585: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
586: {
587:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
588:   Mat            A      = aij->A; /* diagonal part of the matrix */
589:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
590:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
591:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
592:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
593:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
594:   PetscInt       *ailen = a->ilen,*aj = a->j;
595:   PetscInt       *bilen = b->ilen,*bj = b->j;
596:   PetscInt       am     = aij->A->rmap->n,j;
597:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
598:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
599:   PetscScalar    *aa = a->a,*ba = b->a;

602:   /* Iterate over all rows of the matrix */
603:   for (j=0; j<am; j++) {
604:     dnz_row = onz_row = 0;
605:     rowstart_offd = full_offd_i[j];
606:     rowstart_diag = full_diag_i[j];
607:     /*  Iterate over all non-zero columns of the current row */
608:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
609:       /* If column is in the diagonal */
610:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
611:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
612:         aa[rowstart_diag+dnz_row] = mat_a[col];
613:         dnz_row++;
614:       } else { /* off-diagonal entries */
615:         bj[rowstart_offd+onz_row] = mat_j[col];
616:         ba[rowstart_offd+onz_row] = mat_a[col];
617:         onz_row++;
618:       }
619:     }
620:     ailen[j] = dnz_row;
621:     bilen[j] = onz_row;
622:   }
623:   return(0);
624: }

626: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
627: {
628:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
630:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
631:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

634:   for (i=0; i<m; i++) {
635:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
636:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
637:     if (idxm[i] >= rstart && idxm[i] < rend) {
638:       row = idxm[i] - rstart;
639:       for (j=0; j<n; j++) {
640:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
641:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
642:         if (idxn[j] >= cstart && idxn[j] < cend) {
643:           col  = idxn[j] - cstart;
644:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
645:         } else {
646:           if (!aij->colmap) {
647:             MatCreateColmap_MPIAIJ_Private(mat);
648:           }
649: #if defined(PETSC_USE_CTABLE)
650:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
651:           col--;
652: #else
653:           col = aij->colmap[idxn[j]] - 1;
654: #endif
655:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
656:           else {
657:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
658:           }
659:         }
660:       }
661:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
662:   }
663:   return(0);
664: }

666: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
667: {
668:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
670:   PetscInt       nstash,reallocs;

673:   if (aij->donotstash || mat->nooffprocentries) return(0);

675:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
676:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
677:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
678:   return(0);
679: }

681: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
682: {
683:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
685:   PetscMPIInt    n;
686:   PetscInt       i,j,rstart,ncols,flg;
687:   PetscInt       *row,*col;
688:   PetscBool      other_disassembled;
689:   PetscScalar    *val;

691:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

694:   if (!aij->donotstash && !mat->nooffprocentries) {
695:     while (1) {
696:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
697:       if (!flg) break;

699:       for (i=0; i<n;) {
700:         /* Now identify the consecutive vals belonging to the same row */
701:         for (j=i,rstart=row[j]; j<n; j++) {
702:           if (row[j] != rstart) break;
703:         }
704:         if (j < n) ncols = j-i;
705:         else       ncols = n-i;
706:         /* Now assemble all these values with a single function call */
707:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
708:         i    = j;
709:       }
710:     }
711:     MatStashScatterEnd_Private(&mat->stash);
712:   }
713: #if defined(PETSC_HAVE_DEVICE)
714:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
715:   /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
716:   if (mat->boundtocpu) {
717:     MatBindToCPU(aij->A,PETSC_TRUE);
718:     MatBindToCPU(aij->B,PETSC_TRUE);
719:   }
720: #endif
721:   MatAssemblyBegin(aij->A,mode);
722:   MatAssemblyEnd(aij->A,mode);

724:   /* determine if any processor has disassembled, if so we must
725:      also disassemble ourself, in order that we may reassemble. */
726:   /*
727:      if nonzero structure of submatrix B cannot change then we know that
728:      no processor disassembled thus we can skip this stuff
729:   */
730:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
731:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
732:     if (mat->was_assembled && !other_disassembled) {
733: #if defined(PETSC_HAVE_DEVICE)
734:       aij->B->offloadmask = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
735: #endif
736:       MatDisAssemble_MPIAIJ(mat);
737:     }
738:   }
739:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
740:     MatSetUpMultiply_MPIAIJ(mat);
741:   }
742:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
743: #if defined(PETSC_HAVE_DEVICE)
744:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
745: #endif
746:   MatAssemblyBegin(aij->B,mode);
747:   MatAssemblyEnd(aij->B,mode);

749:   PetscFree2(aij->rowvalues,aij->rowindices);

751:   aij->rowvalues = NULL;

753:   VecDestroy(&aij->diag);

755:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
756:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
757:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
758:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
759:   }
760: #if defined(PETSC_HAVE_DEVICE)
761:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
762: #endif
763:   return(0);
764: }

766: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
767: {
768:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

772:   MatZeroEntries(l->A);
773:   MatZeroEntries(l->B);
774:   return(0);
775: }

777: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
778: {
779:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
780:   PetscObjectState sA, sB;
781:   PetscInt        *lrows;
782:   PetscInt         r, len;
783:   PetscBool        cong, lch, gch;
784:   PetscErrorCode   ierr;

787:   /* get locally owned rows */
788:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
789:   MatHasCongruentLayouts(A,&cong);
790:   /* fix right hand side if needed */
791:   if (x && b) {
792:     const PetscScalar *xx;
793:     PetscScalar       *bb;

795:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
796:     VecGetArrayRead(x, &xx);
797:     VecGetArray(b, &bb);
798:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
799:     VecRestoreArrayRead(x, &xx);
800:     VecRestoreArray(b, &bb);
801:   }

803:   sA = mat->A->nonzerostate;
804:   sB = mat->B->nonzerostate;

806:   if (diag != 0.0 && cong) {
807:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
808:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
809:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
810:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
811:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
812:     PetscInt   nnwA, nnwB;
813:     PetscBool  nnzA, nnzB;

815:     nnwA = aijA->nonew;
816:     nnwB = aijB->nonew;
817:     nnzA = aijA->keepnonzeropattern;
818:     nnzB = aijB->keepnonzeropattern;
819:     if (!nnzA) {
820:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
821:       aijA->nonew = 0;
822:     }
823:     if (!nnzB) {
824:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
825:       aijB->nonew = 0;
826:     }
827:     /* Must zero here before the next loop */
828:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
829:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
830:     for (r = 0; r < len; ++r) {
831:       const PetscInt row = lrows[r] + A->rmap->rstart;
832:       if (row >= A->cmap->N) continue;
833:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
834:     }
835:     aijA->nonew = nnwA;
836:     aijB->nonew = nnwB;
837:   } else {
838:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
839:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
840:   }
841:   PetscFree(lrows);
842:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
843:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

845:   /* reduce nonzerostate */
846:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
847:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
848:   if (gch) A->nonzerostate++;
849:   return(0);
850: }

852: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
853: {
854:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
855:   PetscErrorCode    ierr;
856:   PetscMPIInt       n = A->rmap->n;
857:   PetscInt          i,j,r,m,len = 0;
858:   PetscInt          *lrows,*owners = A->rmap->range;
859:   PetscMPIInt       p = 0;
860:   PetscSFNode       *rrows;
861:   PetscSF           sf;
862:   const PetscScalar *xx;
863:   PetscScalar       *bb,*mask;
864:   Vec               xmask,lmask;
865:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
866:   const PetscInt    *aj, *ii,*ridx;
867:   PetscScalar       *aa;

870:   /* Create SF where leaves are input rows and roots are owned rows */
871:   PetscMalloc1(n, &lrows);
872:   for (r = 0; r < n; ++r) lrows[r] = -1;
873:   PetscMalloc1(N, &rrows);
874:   for (r = 0; r < N; ++r) {
875:     const PetscInt idx   = rows[r];
876:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
877:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
878:       PetscLayoutFindOwner(A->rmap,idx,&p);
879:     }
880:     rrows[r].rank  = p;
881:     rrows[r].index = rows[r] - owners[p];
882:   }
883:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
884:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
885:   /* Collect flags for rows to be zeroed */
886:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
887:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
888:   PetscSFDestroy(&sf);
889:   /* Compress and put in row numbers */
890:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
891:   /* zero diagonal part of matrix */
892:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
893:   /* handle off diagonal part of matrix */
894:   MatCreateVecs(A,&xmask,NULL);
895:   VecDuplicate(l->lvec,&lmask);
896:   VecGetArray(xmask,&bb);
897:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
898:   VecRestoreArray(xmask,&bb);
899:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
900:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
901:   VecDestroy(&xmask);
902:   if (x && b) { /* this code is buggy when the row and column layout don't match */
903:     PetscBool cong;

905:     MatHasCongruentLayouts(A,&cong);
906:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
907:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
908:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
909:     VecGetArrayRead(l->lvec,&xx);
910:     VecGetArray(b,&bb);
911:   }
912:   VecGetArray(lmask,&mask);
913:   /* remove zeroed rows of off diagonal matrix */
914:   ii = aij->i;
915:   for (i=0; i<len; i++) {
916:     PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
917:   }
918:   /* loop over all elements of off process part of matrix zeroing removed columns*/
919:   if (aij->compressedrow.use) {
920:     m    = aij->compressedrow.nrows;
921:     ii   = aij->compressedrow.i;
922:     ridx = aij->compressedrow.rindex;
923:     for (i=0; i<m; i++) {
924:       n  = ii[i+1] - ii[i];
925:       aj = aij->j + ii[i];
926:       aa = aij->a + ii[i];

928:       for (j=0; j<n; j++) {
929:         if (PetscAbsScalar(mask[*aj])) {
930:           if (b) bb[*ridx] -= *aa*xx[*aj];
931:           *aa = 0.0;
932:         }
933:         aa++;
934:         aj++;
935:       }
936:       ridx++;
937:     }
938:   } else { /* do not use compressed row format */
939:     m = l->B->rmap->n;
940:     for (i=0; i<m; i++) {
941:       n  = ii[i+1] - ii[i];
942:       aj = aij->j + ii[i];
943:       aa = aij->a + ii[i];
944:       for (j=0; j<n; j++) {
945:         if (PetscAbsScalar(mask[*aj])) {
946:           if (b) bb[i] -= *aa*xx[*aj];
947:           *aa = 0.0;
948:         }
949:         aa++;
950:         aj++;
951:       }
952:     }
953:   }
954:   if (x && b) {
955:     VecRestoreArray(b,&bb);
956:     VecRestoreArrayRead(l->lvec,&xx);
957:   }
958:   VecRestoreArray(lmask,&mask);
959:   VecDestroy(&lmask);
960:   PetscFree(lrows);

962:   /* only change matrix nonzero state if pattern was allowed to be changed */
963:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
964:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
965:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
966:   }
967:   return(0);
968: }

970: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
971: {
972:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
974:   PetscInt       nt;
975:   VecScatter     Mvctx = a->Mvctx;

978:   VecGetLocalSize(xx,&nt);
979:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
980:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
981:   (*a->A->ops->mult)(a->A,xx,yy);
982:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
983:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
984:   return(0);
985: }

987: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
988: {
989:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

993:   MatMultDiagonalBlock(a->A,bb,xx);
994:   return(0);
995: }

997: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
998: {
999:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1001:   VecScatter     Mvctx = a->Mvctx;

1004:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1005:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1006:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1007:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1008:   return(0);
1009: }

1011: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1012: {
1013:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1017:   /* do nondiagonal part */
1018:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1019:   /* do local part */
1020:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1021:   /* add partial results together */
1022:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1023:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1024:   return(0);
1025: }

1027: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1028: {
1029:   MPI_Comm       comm;
1030:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1031:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1032:   IS             Me,Notme;
1034:   PetscInt       M,N,first,last,*notme,i;
1035:   PetscBool      lf;
1036:   PetscMPIInt    size;

1039:   /* Easy test: symmetric diagonal block */
1040:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1041:   MatIsTranspose(Adia,Bdia,tol,&lf);
1042:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1043:   if (!*f) return(0);
1044:   PetscObjectGetComm((PetscObject)Amat,&comm);
1045:   MPI_Comm_size(comm,&size);
1046:   if (size == 1) return(0);

1048:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1049:   MatGetSize(Amat,&M,&N);
1050:   MatGetOwnershipRange(Amat,&first,&last);
1051:   PetscMalloc1(N-last+first,&notme);
1052:   for (i=0; i<first; i++) notme[i] = i;
1053:   for (i=last; i<M; i++) notme[i-last+first] = i;
1054:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1055:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1056:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1057:   Aoff = Aoffs[0];
1058:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1059:   Boff = Boffs[0];
1060:   MatIsTranspose(Aoff,Boff,tol,f);
1061:   MatDestroyMatrices(1,&Aoffs);
1062:   MatDestroyMatrices(1,&Boffs);
1063:   ISDestroy(&Me);
1064:   ISDestroy(&Notme);
1065:   PetscFree(notme);
1066:   return(0);
1067: }

1069: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1070: {

1074:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1075:   return(0);
1076: }

1078: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1079: {
1080:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1084:   /* do nondiagonal part */
1085:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1086:   /* do local part */
1087:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1088:   /* add partial results together */
1089:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1090:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1091:   return(0);
1092: }

1094: /*
1095:   This only works correctly for square matrices where the subblock A->A is the
1096:    diagonal block
1097: */
1098: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1099: {
1101:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1104:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1105:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1106:   MatGetDiagonal(a->A,v);
1107:   return(0);
1108: }

1110: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1111: {
1112:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1116:   MatScale(a->A,aa);
1117:   MatScale(a->B,aa);
1118:   return(0);
1119: }

1121: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1122: {
1123:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1127: #if defined(PETSC_USE_LOG)
1128:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1129: #endif
1130:   MatStashDestroy_Private(&mat->stash);
1131:   VecDestroy(&aij->diag);
1132:   MatDestroy(&aij->A);
1133:   MatDestroy(&aij->B);
1134: #if defined(PETSC_USE_CTABLE)
1135:   PetscTableDestroy(&aij->colmap);
1136: #else
1137:   PetscFree(aij->colmap);
1138: #endif
1139:   PetscFree(aij->garray);
1140:   VecDestroy(&aij->lvec);
1141:   VecScatterDestroy(&aij->Mvctx);
1142:   PetscFree2(aij->rowvalues,aij->rowindices);
1143:   PetscFree(aij->ld);
1144:   PetscFree(mat->data);

1146:   /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
1147:   PetscObjectCompose((PetscObject)mat,"MatMergeSeqsToMPI",NULL);

1149:   PetscObjectChangeTypeName((PetscObject)mat,NULL);
1150:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1151:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1152:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1153:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1154:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1155:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1156:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1157:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpibaij_C",NULL);
1158:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1159: #if defined(PETSC_HAVE_CUDA)
1160:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcusparse_C",NULL);
1161: #endif
1162: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1163:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijkokkos_C",NULL);
1164: #endif
1165:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpidense_C",NULL);
1166: #if defined(PETSC_HAVE_ELEMENTAL)
1167:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1168: #endif
1169: #if defined(PETSC_HAVE_SCALAPACK)
1170:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_scalapack_C",NULL);
1171: #endif
1172: #if defined(PETSC_HAVE_HYPRE)
1173:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1174:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",NULL);
1175: #endif
1176:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1177:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_is_mpiaij_C",NULL);
1178:   PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpiaij_C",NULL);
1179:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetUseScalableIncreaseOverlap_C",NULL);
1180:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijperm_C",NULL);
1181:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijsell_C",NULL);
1182: #if defined(PETSC_HAVE_MKL_SPARSE)
1183:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijmkl_C",NULL);
1184: #endif
1185:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpiaijcrl_C",NULL);
1186:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1187:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisell_C",NULL);
1188:   return(0);
1189: }

1191: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1192: {
1193:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1194:   Mat_SeqAIJ        *A   = (Mat_SeqAIJ*)aij->A->data;
1195:   Mat_SeqAIJ        *B   = (Mat_SeqAIJ*)aij->B->data;
1196:   const PetscInt    *garray = aij->garray;
1197:   const PetscScalar *aa,*ba;
1198:   PetscInt          header[4],M,N,m,rs,cs,nz,cnt,i,ja,jb;
1199:   PetscInt          *rowlens;
1200:   PetscInt          *colidxs;
1201:   PetscScalar       *matvals;
1202:   PetscErrorCode    ierr;

1205:   PetscViewerSetUp(viewer);

1207:   M  = mat->rmap->N;
1208:   N  = mat->cmap->N;
1209:   m  = mat->rmap->n;
1210:   rs = mat->rmap->rstart;
1211:   cs = mat->cmap->rstart;
1212:   nz = A->nz + B->nz;

1214:   /* write matrix header */
1215:   header[0] = MAT_FILE_CLASSID;
1216:   header[1] = M; header[2] = N; header[3] = nz;
1217:   MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1218:   PetscViewerBinaryWrite(viewer,header,4,PETSC_INT);

1220:   /* fill in and store row lengths  */
1221:   PetscMalloc1(m,&rowlens);
1222:   for (i=0; i<m; i++) rowlens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1223:   PetscViewerBinaryWriteAll(viewer,rowlens,m,rs,M,PETSC_INT);
1224:   PetscFree(rowlens);

1226:   /* fill in and store column indices */
1227:   PetscMalloc1(nz,&colidxs);
1228:   for (cnt=0, i=0; i<m; i++) {
1229:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1230:       if (garray[B->j[jb]] > cs) break;
1231:       colidxs[cnt++] = garray[B->j[jb]];
1232:     }
1233:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1234:       colidxs[cnt++] = A->j[ja] + cs;
1235:     for (; jb<B->i[i+1]; jb++)
1236:       colidxs[cnt++] = garray[B->j[jb]];
1237:   }
1238:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1239:   PetscViewerBinaryWriteAll(viewer,colidxs,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
1240:   PetscFree(colidxs);

1242:   /* fill in and store nonzero values */
1243:   MatSeqAIJGetArrayRead(aij->A,&aa);
1244:   MatSeqAIJGetArrayRead(aij->B,&ba);
1245:   PetscMalloc1(nz,&matvals);
1246:   for (cnt=0, i=0; i<m; i++) {
1247:     for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1248:       if (garray[B->j[jb]] > cs) break;
1249:       matvals[cnt++] = ba[jb];
1250:     }
1251:     for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1252:       matvals[cnt++] = aa[ja];
1253:     for (; jb<B->i[i+1]; jb++)
1254:       matvals[cnt++] = ba[jb];
1255:   }
1256:   MatSeqAIJRestoreArrayRead(aij->A,&aa);
1257:   MatSeqAIJRestoreArrayRead(aij->B,&ba);
1258:   if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1259:   PetscViewerBinaryWriteAll(viewer,matvals,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
1260:   PetscFree(matvals);

1262:   /* write block size option to the viewer's .info file */
1263:   MatView_Binary_BlockSizes(mat,viewer);
1264:   return(0);
1265: }

1267: #include <petscdraw.h>
1268: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1269: {
1270:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1271:   PetscErrorCode    ierr;
1272:   PetscMPIInt       rank = aij->rank,size = aij->size;
1273:   PetscBool         isdraw,iascii,isbinary;
1274:   PetscViewer       sviewer;
1275:   PetscViewerFormat format;

1278:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1279:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1280:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1281:   if (iascii) {
1282:     PetscViewerGetFormat(viewer,&format);
1283:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1284:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1285:       PetscMalloc1(size,&nz);
1286:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1287:       for (i=0; i<(PetscInt)size; i++) {
1288:         nmax = PetscMax(nmax,nz[i]);
1289:         nmin = PetscMin(nmin,nz[i]);
1290:         navg += nz[i];
1291:       }
1292:       PetscFree(nz);
1293:       navg = navg/size;
1294:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1295:       return(0);
1296:     }
1297:     PetscViewerGetFormat(viewer,&format);
1298:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1299:       MatInfo   info;
1300:       PetscInt *inodes=NULL;

1302:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1303:       MatGetInfo(mat,MAT_LOCAL,&info);
1304:       MatInodeGetInodeSizes(aij->A,NULL,&inodes,NULL);
1305:       PetscViewerASCIIPushSynchronized(viewer);
1306:       if (!inodes) {
1307:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1308:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1309:       } else {
1310:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1311:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1312:       }
1313:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1314:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1315:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1316:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1317:       PetscViewerFlush(viewer);
1318:       PetscViewerASCIIPopSynchronized(viewer);
1319:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1320:       VecScatterView(aij->Mvctx,viewer);
1321:       return(0);
1322:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1323:       PetscInt inodecount,inodelimit,*inodes;
1324:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1325:       if (inodes) {
1326:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1327:       } else {
1328:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1329:       }
1330:       return(0);
1331:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1332:       return(0);
1333:     }
1334:   } else if (isbinary) {
1335:     if (size == 1) {
1336:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1337:       MatView(aij->A,viewer);
1338:     } else {
1339:       MatView_MPIAIJ_Binary(mat,viewer);
1340:     }
1341:     return(0);
1342:   } else if (iascii && size == 1) {
1343:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1344:     MatView(aij->A,viewer);
1345:     return(0);
1346:   } else if (isdraw) {
1347:     PetscDraw draw;
1348:     PetscBool isnull;
1349:     PetscViewerDrawGetDraw(viewer,0,&draw);
1350:     PetscDrawIsNull(draw,&isnull);
1351:     if (isnull) return(0);
1352:   }

1354:   { /* assemble the entire matrix onto first processor */
1355:     Mat A = NULL, Av;
1356:     IS  isrow,iscol;

1358:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow);
1359:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol);
1360:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1361:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1362: /*  The commented code uses MatCreateSubMatrices instead */
1363: /*
1364:     Mat *AA, A = NULL, Av;
1365:     IS  isrow,iscol;

1367:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow);
1368:     ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol);
1369:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1370:     if (rank == 0) {
1371:        PetscObjectReference((PetscObject)AA[0]);
1372:        A    = AA[0];
1373:        Av   = AA[0];
1374:     }
1375:     MatDestroySubMatrices(1,&AA);
1376: */
1377:     ISDestroy(&iscol);
1378:     ISDestroy(&isrow);
1379:     /*
1380:        Everyone has to call to draw the matrix since the graphics waits are
1381:        synchronized across all processors that share the PetscDraw object
1382:     */
1383:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1384:     if (rank == 0) {
1385:       if (((PetscObject)mat)->name) {
1386:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1387:       }
1388:       MatView_SeqAIJ(Av,sviewer);
1389:     }
1390:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1391:     PetscViewerFlush(viewer);
1392:     MatDestroy(&A);
1393:   }
1394:   return(0);
1395: }

1397: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1398: {
1400:   PetscBool      iascii,isdraw,issocket,isbinary;

1403:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1404:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1405:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1406:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1407:   if (iascii || isdraw || isbinary || issocket) {
1408:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1409:   }
1410:   return(0);
1411: }

1413: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1414: {
1415:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1417:   Vec            bb1 = NULL;
1418:   PetscBool      hasop;

1421:   if (flag == SOR_APPLY_UPPER) {
1422:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1423:     return(0);
1424:   }

1426:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1427:     VecDuplicate(bb,&bb1);
1428:   }

1430:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1431:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1432:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1433:       its--;
1434:     }

1436:     while (its--) {
1437:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1438:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1440:       /* update rhs: bb1 = bb - B*x */
1441:       VecScale(mat->lvec,-1.0);
1442:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1444:       /* local sweep */
1445:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1446:     }
1447:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1448:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1449:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1450:       its--;
1451:     }
1452:     while (its--) {
1453:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1454:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1456:       /* update rhs: bb1 = bb - B*x */
1457:       VecScale(mat->lvec,-1.0);
1458:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1460:       /* local sweep */
1461:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1462:     }
1463:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1464:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1465:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1466:       its--;
1467:     }
1468:     while (its--) {
1469:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1470:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1472:       /* update rhs: bb1 = bb - B*x */
1473:       VecScale(mat->lvec,-1.0);
1474:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1476:       /* local sweep */
1477:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1478:     }
1479:   } else if (flag & SOR_EISENSTAT) {
1480:     Vec xx1;

1482:     VecDuplicate(bb,&xx1);
1483:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1485:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1486:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1487:     if (!mat->diag) {
1488:       MatCreateVecs(matin,&mat->diag,NULL);
1489:       MatGetDiagonal(matin,mat->diag);
1490:     }
1491:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1492:     if (hasop) {
1493:       MatMultDiagonalBlock(matin,xx,bb1);
1494:     } else {
1495:       VecPointwiseMult(bb1,mat->diag,xx);
1496:     }
1497:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1499:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1501:     /* local sweep */
1502:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1503:     VecAXPY(xx,1.0,xx1);
1504:     VecDestroy(&xx1);
1505:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1507:   VecDestroy(&bb1);

1509:   matin->factorerrortype = mat->A->factorerrortype;
1510:   return(0);
1511: }

1513: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1514: {
1515:   Mat            aA,aB,Aperm;
1516:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1517:   PetscScalar    *aa,*ba;
1518:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1519:   PetscSF        rowsf,sf;
1520:   IS             parcolp = NULL;
1521:   PetscBool      done;

1525:   MatGetLocalSize(A,&m,&n);
1526:   ISGetIndices(rowp,&rwant);
1527:   ISGetIndices(colp,&cwant);
1528:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1530:   /* Invert row permutation to find out where my rows should go */
1531:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1532:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1533:   PetscSFSetFromOptions(rowsf);
1534:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1535:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);
1536:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPI_REPLACE);

1538:   /* Invert column permutation to find out where my columns should go */
1539:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1540:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1541:   PetscSFSetFromOptions(sf);
1542:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1543:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1544:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPI_REPLACE);
1545:   PetscSFDestroy(&sf);

1547:   ISRestoreIndices(rowp,&rwant);
1548:   ISRestoreIndices(colp,&cwant);
1549:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1551:   /* Find out where my gcols should go */
1552:   MatGetSize(aB,NULL,&ng);
1553:   PetscMalloc1(ng,&gcdest);
1554:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1555:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1556:   PetscSFSetFromOptions(sf);
1557:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1558:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest,MPI_REPLACE);
1559:   PetscSFDestroy(&sf);

1561:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1562:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1563:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1564:   for (i=0; i<m; i++) {
1565:     PetscInt    row = rdest[i];
1566:     PetscMPIInt rowner;
1567:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1568:     for (j=ai[i]; j<ai[i+1]; j++) {
1569:       PetscInt    col = cdest[aj[j]];
1570:       PetscMPIInt cowner;
1571:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1572:       if (rowner == cowner) dnnz[i]++;
1573:       else onnz[i]++;
1574:     }
1575:     for (j=bi[i]; j<bi[i+1]; j++) {
1576:       PetscInt    col = gcdest[bj[j]];
1577:       PetscMPIInt cowner;
1578:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1579:       if (rowner == cowner) dnnz[i]++;
1580:       else onnz[i]++;
1581:     }
1582:   }
1583:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1584:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz,MPI_REPLACE);
1585:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1586:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz,MPI_REPLACE);
1587:   PetscSFDestroy(&rowsf);

1589:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1590:   MatSeqAIJGetArray(aA,&aa);
1591:   MatSeqAIJGetArray(aB,&ba);
1592:   for (i=0; i<m; i++) {
1593:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1594:     PetscInt j0,rowlen;
1595:     rowlen = ai[i+1] - ai[i];
1596:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1597:       for (; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1598:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1599:     }
1600:     rowlen = bi[i+1] - bi[i];
1601:     for (j0=j=0; j<rowlen; j0=j) {
1602:       for (; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1603:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1604:     }
1605:   }
1606:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1607:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1608:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1609:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1610:   MatSeqAIJRestoreArray(aA,&aa);
1611:   MatSeqAIJRestoreArray(aB,&ba);
1612:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1613:   PetscFree3(work,rdest,cdest);
1614:   PetscFree(gcdest);
1615:   if (parcolp) {ISDestroy(&colp);}
1616:   *B = Aperm;
1617:   return(0);
1618: }

1620: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1621: {
1622:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1626:   MatGetSize(aij->B,NULL,nghosts);
1627:   if (ghosts) *ghosts = aij->garray;
1628:   return(0);
1629: }

1631: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1632: {
1633:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1634:   Mat            A    = mat->A,B = mat->B;
1636:   PetscLogDouble isend[5],irecv[5];

1639:   info->block_size = 1.0;
1640:   MatGetInfo(A,MAT_LOCAL,info);

1642:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1643:   isend[3] = info->memory;  isend[4] = info->mallocs;

1645:   MatGetInfo(B,MAT_LOCAL,info);

1647:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1648:   isend[3] += info->memory;  isend[4] += info->mallocs;
1649:   if (flag == MAT_LOCAL) {
1650:     info->nz_used      = isend[0];
1651:     info->nz_allocated = isend[1];
1652:     info->nz_unneeded  = isend[2];
1653:     info->memory       = isend[3];
1654:     info->mallocs      = isend[4];
1655:   } else if (flag == MAT_GLOBAL_MAX) {
1656:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));

1658:     info->nz_used      = irecv[0];
1659:     info->nz_allocated = irecv[1];
1660:     info->nz_unneeded  = irecv[2];
1661:     info->memory       = irecv[3];
1662:     info->mallocs      = irecv[4];
1663:   } else if (flag == MAT_GLOBAL_SUM) {
1664:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));

1666:     info->nz_used      = irecv[0];
1667:     info->nz_allocated = irecv[1];
1668:     info->nz_unneeded  = irecv[2];
1669:     info->memory       = irecv[3];
1670:     info->mallocs      = irecv[4];
1671:   }
1672:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1673:   info->fill_ratio_needed = 0;
1674:   info->factor_mallocs    = 0;
1675:   return(0);
1676: }

1678: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1679: {
1680:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1684:   switch (op) {
1685:   case MAT_NEW_NONZERO_LOCATIONS:
1686:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1687:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1688:   case MAT_KEEP_NONZERO_PATTERN:
1689:   case MAT_NEW_NONZERO_LOCATION_ERR:
1690:   case MAT_USE_INODES:
1691:   case MAT_IGNORE_ZERO_ENTRIES:
1692:   case MAT_FORM_EXPLICIT_TRANSPOSE:
1693:     MatCheckPreallocated(A,1);
1694:     MatSetOption(a->A,op,flg);
1695:     MatSetOption(a->B,op,flg);
1696:     break;
1697:   case MAT_ROW_ORIENTED:
1698:     MatCheckPreallocated(A,1);
1699:     a->roworiented = flg;

1701:     MatSetOption(a->A,op,flg);
1702:     MatSetOption(a->B,op,flg);
1703:     break;
1704:   case MAT_FORCE_DIAGONAL_ENTRIES:
1705:   case MAT_SORTED_FULL:
1706:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1707:     break;
1708:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1709:     a->donotstash = flg;
1710:     break;
1711:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1712:   case MAT_SPD:
1713:   case MAT_SYMMETRIC:
1714:   case MAT_STRUCTURALLY_SYMMETRIC:
1715:   case MAT_HERMITIAN:
1716:   case MAT_SYMMETRY_ETERNAL:
1717:     break;
1718:   case MAT_SUBMAT_SINGLEIS:
1719:     A->submat_singleis = flg;
1720:     break;
1721:   case MAT_STRUCTURE_ONLY:
1722:     /* The option is handled directly by MatSetOption() */
1723:     break;
1724:   default:
1725:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1726:   }
1727:   return(0);
1728: }

1730: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1731: {
1732:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1733:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1735:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1736:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1737:   PetscInt       *cmap,*idx_p;

1740:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1741:   mat->getrowactive = PETSC_TRUE;

1743:   if (!mat->rowvalues && (idx || v)) {
1744:     /*
1745:         allocate enough space to hold information from the longest row.
1746:     */
1747:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1748:     PetscInt   max = 1,tmp;
1749:     for (i=0; i<matin->rmap->n; i++) {
1750:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1751:       if (max < tmp) max = tmp;
1752:     }
1753:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1754:   }

1756:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1757:   lrow = row - rstart;

1759:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1760:   if (!v)   {pvA = NULL; pvB = NULL;}
1761:   if (!idx) {pcA = NULL; if (!v) pcB = NULL;}
1762:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1763:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1764:   nztot = nzA + nzB;

1766:   cmap = mat->garray;
1767:   if (v  || idx) {
1768:     if (nztot) {
1769:       /* Sort by increasing column numbers, assuming A and B already sorted */
1770:       PetscInt imark = -1;
1771:       if (v) {
1772:         *v = v_p = mat->rowvalues;
1773:         for (i=0; i<nzB; i++) {
1774:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1775:           else break;
1776:         }
1777:         imark = i;
1778:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1779:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1780:       }
1781:       if (idx) {
1782:         *idx = idx_p = mat->rowindices;
1783:         if (imark > -1) {
1784:           for (i=0; i<imark; i++) {
1785:             idx_p[i] = cmap[cworkB[i]];
1786:           }
1787:         } else {
1788:           for (i=0; i<nzB; i++) {
1789:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1790:             else break;
1791:           }
1792:           imark = i;
1793:         }
1794:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1795:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1796:       }
1797:     } else {
1798:       if (idx) *idx = NULL;
1799:       if (v)   *v   = NULL;
1800:     }
1801:   }
1802:   *nz  = nztot;
1803:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1804:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1805:   return(0);
1806: }

1808: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1809: {
1810:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1813:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1814:   aij->getrowactive = PETSC_FALSE;
1815:   return(0);
1816: }

1818: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1819: {
1820:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1821:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1823:   PetscInt       i,j,cstart = mat->cmap->rstart;
1824:   PetscReal      sum = 0.0;
1825:   MatScalar      *v;

1828:   if (aij->size == 1) {
1829:      MatNorm(aij->A,type,norm);
1830:   } else {
1831:     if (type == NORM_FROBENIUS) {
1832:       v = amat->a;
1833:       for (i=0; i<amat->nz; i++) {
1834:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1835:       }
1836:       v = bmat->a;
1837:       for (i=0; i<bmat->nz; i++) {
1838:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1839:       }
1840:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1841:       *norm = PetscSqrtReal(*norm);
1842:       PetscLogFlops(2.0*amat->nz+2.0*bmat->nz);
1843:     } else if (type == NORM_1) { /* max column norm */
1844:       PetscReal *tmp,*tmp2;
1845:       PetscInt  *jj,*garray = aij->garray;
1846:       PetscCalloc1(mat->cmap->N+1,&tmp);
1847:       PetscMalloc1(mat->cmap->N+1,&tmp2);
1848:       *norm = 0.0;
1849:       v     = amat->a; jj = amat->j;
1850:       for (j=0; j<amat->nz; j++) {
1851:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1852:       }
1853:       v = bmat->a; jj = bmat->j;
1854:       for (j=0; j<bmat->nz; j++) {
1855:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1856:       }
1857:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1858:       for (j=0; j<mat->cmap->N; j++) {
1859:         if (tmp2[j] > *norm) *norm = tmp2[j];
1860:       }
1861:       PetscFree(tmp);
1862:       PetscFree(tmp2);
1863:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1864:     } else if (type == NORM_INFINITY) { /* max row norm */
1865:       PetscReal ntemp = 0.0;
1866:       for (j=0; j<aij->A->rmap->n; j++) {
1867:         v   = amat->a + amat->i[j];
1868:         sum = 0.0;
1869:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1870:           sum += PetscAbsScalar(*v); v++;
1871:         }
1872:         v = bmat->a + bmat->i[j];
1873:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1874:           sum += PetscAbsScalar(*v); v++;
1875:         }
1876:         if (sum > ntemp) ntemp = sum;
1877:       }
1878:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
1879:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1880:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1881:   }
1882:   return(0);
1883: }

1885: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1886: {
1887:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
1888:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
1889:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
1890:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
1891:   PetscErrorCode  ierr;
1892:   Mat             B,A_diag,*B_diag;
1893:   const MatScalar *pbv,*bv;

1896:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1897:   ai = Aloc->i; aj = Aloc->j;
1898:   bi = Bloc->i; bj = Bloc->j;
1899:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1900:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
1901:     PetscSFNode          *oloc;
1902:     PETSC_UNUSED PetscSF sf;

1904:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
1905:     /* compute d_nnz for preallocation */
1906:     PetscArrayzero(d_nnz,na);
1907:     for (i=0; i<ai[ma]; i++) {
1908:       d_nnz[aj[i]]++;
1909:     }
1910:     /* compute local off-diagonal contributions */
1911:     PetscArrayzero(g_nnz,nb);
1912:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1913:     /* map those to global */
1914:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1915:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
1916:     PetscSFSetFromOptions(sf);
1917:     PetscArrayzero(o_nnz,na);
1918:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1919:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
1920:     PetscSFDestroy(&sf);

1922:     MatCreate(PetscObjectComm((PetscObject)A),&B);
1923:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1924:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
1925:     MatSetType(B,((PetscObject)A)->type_name);
1926:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
1927:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
1928:   } else {
1929:     B    = *matout;
1930:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
1931:   }

1933:   b           = (Mat_MPIAIJ*)B->data;
1934:   A_diag      = a->A;
1935:   B_diag      = &b->A;
1936:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
1937:   A_diag_ncol = A_diag->cmap->N;
1938:   B_diag_ilen = sub_B_diag->ilen;
1939:   B_diag_i    = sub_B_diag->i;

1941:   /* Set ilen for diagonal of B */
1942:   for (i=0; i<A_diag_ncol; i++) {
1943:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
1944:   }

1946:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
1947:   very quickly (=without using MatSetValues), because all writes are local. */
1948:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

1950:   /* copy over the B part */
1951:   PetscMalloc1(bi[mb],&cols);
1952:   MatSeqAIJGetArrayRead(a->B,&bv);
1953:   pbv  = bv;
1954:   row  = A->rmap->rstart;
1955:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
1956:   cols_tmp = cols;
1957:   for (i=0; i<mb; i++) {
1958:     ncol = bi[i+1]-bi[i];
1959:     MatSetValues(B,ncol,cols_tmp,1,&row,pbv,INSERT_VALUES);
1960:     row++;
1961:     pbv += ncol; cols_tmp += ncol;
1962:   }
1963:   PetscFree(cols);
1964:   MatSeqAIJRestoreArrayRead(a->B,&bv);

1966:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1967:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1968:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1969:     *matout = B;
1970:   } else {
1971:     MatHeaderMerge(A,&B);
1972:   }
1973:   return(0);
1974: }

1976: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1977: {
1978:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1979:   Mat            a    = aij->A,b = aij->B;
1981:   PetscInt       s1,s2,s3;

1984:   MatGetLocalSize(mat,&s2,&s3);
1985:   if (rr) {
1986:     VecGetLocalSize(rr,&s1);
1987:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1988:     /* Overlap communication with computation. */
1989:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1990:   }
1991:   if (ll) {
1992:     VecGetLocalSize(ll,&s1);
1993:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1994:     (*b->ops->diagonalscale)(b,ll,NULL);
1995:   }
1996:   /* scale  the diagonal block */
1997:   (*a->ops->diagonalscale)(a,ll,rr);

1999:   if (rr) {
2000:     /* Do a scatter end and then right scale the off-diagonal block */
2001:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2002:     (*b->ops->diagonalscale)(b,NULL,aij->lvec);
2003:   }
2004:   return(0);
2005: }

2007: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2008: {
2009:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2013:   MatSetUnfactored(a->A);
2014:   return(0);
2015: }

2017: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2018: {
2019:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2020:   Mat            a,b,c,d;
2021:   PetscBool      flg;

2025:   a = matA->A; b = matA->B;
2026:   c = matB->A; d = matB->B;

2028:   MatEqual(a,c,&flg);
2029:   if (flg) {
2030:     MatEqual(b,d,&flg);
2031:   }
2032:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2033:   return(0);
2034: }

2036: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2037: {
2039:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2040:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2043:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2044:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2045:     /* because of the column compression in the off-processor part of the matrix a->B,
2046:        the number of columns in a->B and b->B may be different, hence we cannot call
2047:        the MatCopy() directly on the two parts. If need be, we can provide a more
2048:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2049:        then copying the submatrices */
2050:     MatCopy_Basic(A,B,str);
2051:   } else {
2052:     MatCopy(a->A,b->A,str);
2053:     MatCopy(a->B,b->B,str);
2054:   }
2055:   PetscObjectStateIncrease((PetscObject)B);
2056:   return(0);
2057: }

2059: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2060: {

2064:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);
2065:   return(0);
2066: }

2068: /*
2069:    Computes the number of nonzeros per row needed for preallocation when X and Y
2070:    have different nonzero structure.
2071: */
2072: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2073: {
2074:   PetscInt       i,j,k,nzx,nzy;

2077:   /* Set the number of nonzeros in the new matrix */
2078:   for (i=0; i<m; i++) {
2079:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2080:     nzx = xi[i+1] - xi[i];
2081:     nzy = yi[i+1] - yi[i];
2082:     nnz[i] = 0;
2083:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2084:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2085:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2086:       nnz[i]++;
2087:     }
2088:     for (; k<nzy; k++) nnz[i]++;
2089:   }
2090:   return(0);
2091: }

2093: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2094: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2095: {
2097:   PetscInt       m = Y->rmap->N;
2098:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2099:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2102:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2103:   return(0);
2104: }

2106: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2107: {
2109:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;

2112:   if (str == SAME_NONZERO_PATTERN) {
2113:     MatAXPY(yy->A,a,xx->A,str);
2114:     MatAXPY(yy->B,a,xx->B,str);
2115:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2116:     MatAXPY_Basic(Y,a,X,str);
2117:   } else {
2118:     Mat      B;
2119:     PetscInt *nnz_d,*nnz_o;

2121:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2122:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2123:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2124:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2125:     MatSetLayouts(B,Y->rmap,Y->cmap);
2126:     MatSetType(B,((PetscObject)Y)->type_name);
2127:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2128:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2129:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2130:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2131:     MatHeaderReplace(Y,&B);
2132:     PetscFree(nnz_d);
2133:     PetscFree(nnz_o);
2134:   }
2135:   return(0);
2136: }

2138: PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);

2140: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2141: {
2142: #if defined(PETSC_USE_COMPLEX)
2144:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2147:   MatConjugate_SeqAIJ(aij->A);
2148:   MatConjugate_SeqAIJ(aij->B);
2149: #else
2151: #endif
2152:   return(0);
2153: }

2155: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2156: {
2157:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2161:   MatRealPart(a->A);
2162:   MatRealPart(a->B);
2163:   return(0);
2164: }

2166: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2167: {
2168:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2172:   MatImaginaryPart(a->A);
2173:   MatImaginaryPart(a->B);
2174:   return(0);
2175: }

2177: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2178: {
2179:   Mat_MPIAIJ        *a = (Mat_MPIAIJ*)A->data;
2180:   PetscErrorCode    ierr;
2181:   PetscInt          i,*idxb = NULL,m = A->rmap->n;
2182:   PetscScalar       *va,*vv;
2183:   Vec               vB,vA;
2184:   const PetscScalar *vb;

2187:   VecCreateSeq(PETSC_COMM_SELF,m,&vA);
2188:   MatGetRowMaxAbs(a->A,vA,idx);

2190:   VecGetArrayWrite(vA,&va);
2191:   if (idx) {
2192:     for (i=0; i<m; i++) {
2193:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2194:     }
2195:   }

2197:   VecCreateSeq(PETSC_COMM_SELF,m,&vB);
2198:   PetscMalloc1(m,&idxb);
2199:   MatGetRowMaxAbs(a->B,vB,idxb);

2201:   VecGetArrayWrite(v,&vv);
2202:   VecGetArrayRead(vB,&vb);
2203:   for (i=0; i<m; i++) {
2204:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2205:       vv[i] = vb[i];
2206:       if (idx) idx[i] = a->garray[idxb[i]];
2207:     } else {
2208:       vv[i] = va[i];
2209:       if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]])
2210:         idx[i] = a->garray[idxb[i]];
2211:     }
2212:   }
2213:   VecRestoreArrayWrite(vA,&vv);
2214:   VecRestoreArrayWrite(vA,&va);
2215:   VecRestoreArrayRead(vB,&vb);
2216:   PetscFree(idxb);
2217:   VecDestroy(&vA);
2218:   VecDestroy(&vB);
2219:   return(0);
2220: }

2222: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2223: {
2224:   Mat_MPIAIJ        *mat   = (Mat_MPIAIJ*) A->data;
2225:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2226:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2227:   PetscInt          *cmap  = mat->garray;
2228:   PetscInt          *diagIdx, *offdiagIdx;
2229:   Vec               diagV, offdiagV;
2230:   PetscScalar       *a, *diagA, *offdiagA;
2231:   const PetscScalar *ba,*bav;
2232:   PetscInt          r,j,col,ncols,*bi,*bj;
2233:   PetscErrorCode    ierr;
2234:   Mat               B = mat->B;
2235:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2238:   /* When a process holds entire A and other processes have no entry */
2239:   if (A->cmap->N == n) {
2240:     VecGetArrayWrite(v,&diagA);
2241:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2242:     MatGetRowMinAbs(mat->A,diagV,idx);
2243:     VecDestroy(&diagV);
2244:     VecRestoreArrayWrite(v,&diagA);
2245:     return(0);
2246:   } else if (n == 0) {
2247:     if (m) {
2248:       VecGetArrayWrite(v,&a);
2249:       for (r = 0; r < m; r++) {a[r] = 0.0; if (idx) idx[r] = -1;}
2250:       VecRestoreArrayWrite(v,&a);
2251:     }
2252:     return(0);
2253:   }

2255:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2256:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2257:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2258:   MatGetRowMinAbs(mat->A, diagV, diagIdx);

2260:   /* Get offdiagIdx[] for implicit 0.0 */
2261:   MatSeqAIJGetArrayRead(B,&bav);
2262:   ba   = bav;
2263:   bi   = b->i;
2264:   bj   = b->j;
2265:   VecGetArrayWrite(offdiagV, &offdiagA);
2266:   for (r = 0; r < m; r++) {
2267:     ncols = bi[r+1] - bi[r];
2268:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2269:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2270:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2271:       offdiagA[r] = 0.0;

2273:       /* Find first hole in the cmap */
2274:       for (j=0; j<ncols; j++) {
2275:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2276:         if (col > j && j < cstart) {
2277:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2278:           break;
2279:         } else if (col > j + n && j >= cstart) {
2280:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2281:           break;
2282:         }
2283:       }
2284:       if (j == ncols && ncols < A->cmap->N - n) {
2285:         /* a hole is outside compressed Bcols */
2286:         if (ncols == 0) {
2287:           if (cstart) {
2288:             offdiagIdx[r] = 0;
2289:           } else offdiagIdx[r] = cend;
2290:         } else { /* ncols > 0 */
2291:           offdiagIdx[r] = cmap[ncols-1] + 1;
2292:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2293:         }
2294:       }
2295:     }

2297:     for (j=0; j<ncols; j++) {
2298:       if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2299:       ba++; bj++;
2300:     }
2301:   }

2303:   VecGetArrayWrite(v, &a);
2304:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2305:   for (r = 0; r < m; ++r) {
2306:     if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) {
2307:       a[r]   = diagA[r];
2308:       if (idx) idx[r] = cstart + diagIdx[r];
2309:     } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) {
2310:       a[r] = diagA[r];
2311:       if (idx) {
2312:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2313:           idx[r] = cstart + diagIdx[r];
2314:         } else idx[r] = offdiagIdx[r];
2315:       }
2316:     } else {
2317:       a[r]   = offdiagA[r];
2318:       if (idx) idx[r] = offdiagIdx[r];
2319:     }
2320:   }
2321:   MatSeqAIJRestoreArrayRead(B,&bav);
2322:   VecRestoreArrayWrite(v, &a);
2323:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2324:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2325:   VecDestroy(&diagV);
2326:   VecDestroy(&offdiagV);
2327:   PetscFree2(diagIdx, offdiagIdx);
2328:   return(0);
2329: }

2331: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2332: {
2333:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*) A->data;
2334:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2335:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2336:   PetscInt          *cmap  = mat->garray;
2337:   PetscInt          *diagIdx, *offdiagIdx;
2338:   Vec               diagV, offdiagV;
2339:   PetscScalar       *a, *diagA, *offdiagA;
2340:   const PetscScalar *ba,*bav;
2341:   PetscInt          r,j,col,ncols,*bi,*bj;
2342:   PetscErrorCode    ierr;
2343:   Mat               B = mat->B;
2344:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2347:   /* When a process holds entire A and other processes have no entry */
2348:   if (A->cmap->N == n) {
2349:     VecGetArrayWrite(v,&diagA);
2350:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2351:     MatGetRowMin(mat->A,diagV,idx);
2352:     VecDestroy(&diagV);
2353:     VecRestoreArrayWrite(v,&diagA);
2354:     return(0);
2355:   } else if (n == 0) {
2356:     if (m) {
2357:       VecGetArrayWrite(v,&a);
2358:       for (r = 0; r < m; r++) {a[r] = PETSC_MAX_REAL; if (idx) idx[r] = -1;}
2359:       VecRestoreArrayWrite(v,&a);
2360:     }
2361:     return(0);
2362:   }

2364:   PetscCalloc2(m,&diagIdx,m,&offdiagIdx);
2365:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2366:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2367:   MatGetRowMin(mat->A, diagV, diagIdx);

2369:   /* Get offdiagIdx[] for implicit 0.0 */
2370:   MatSeqAIJGetArrayRead(B,&bav);
2371:   ba   = bav;
2372:   bi   = b->i;
2373:   bj   = b->j;
2374:   VecGetArrayWrite(offdiagV, &offdiagA);
2375:   for (r = 0; r < m; r++) {
2376:     ncols = bi[r+1] - bi[r];
2377:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2378:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2379:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2380:       offdiagA[r] = 0.0;

2382:       /* Find first hole in the cmap */
2383:       for (j=0; j<ncols; j++) {
2384:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2385:         if (col > j && j < cstart) {
2386:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2387:           break;
2388:         } else if (col > j + n && j >= cstart) {
2389:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2390:           break;
2391:         }
2392:       }
2393:       if (j == ncols && ncols < A->cmap->N - n) {
2394:         /* a hole is outside compressed Bcols */
2395:         if (ncols == 0) {
2396:           if (cstart) {
2397:             offdiagIdx[r] = 0;
2398:           } else offdiagIdx[r] = cend;
2399:         } else { /* ncols > 0 */
2400:           offdiagIdx[r] = cmap[ncols-1] + 1;
2401:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2402:         }
2403:       }
2404:     }

2406:     for (j=0; j<ncols; j++) {
2407:       if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2408:       ba++; bj++;
2409:     }
2410:   }

2412:   VecGetArrayWrite(v, &a);
2413:   VecGetArrayRead(diagV, (const PetscScalar**)&diagA);
2414:   for (r = 0; r < m; ++r) {
2415:     if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) {
2416:       a[r]   = diagA[r];
2417:       if (idx) idx[r] = cstart + diagIdx[r];
2418:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2419:       a[r] = diagA[r];
2420:       if (idx) {
2421:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2422:           idx[r] = cstart + diagIdx[r];
2423:         } else idx[r] = offdiagIdx[r];
2424:       }
2425:     } else {
2426:       a[r]   = offdiagA[r];
2427:       if (idx) idx[r] = offdiagIdx[r];
2428:     }
2429:   }
2430:   MatSeqAIJRestoreArrayRead(B,&bav);
2431:   VecRestoreArrayWrite(v, &a);
2432:   VecRestoreArrayRead(diagV, (const PetscScalar**)&diagA);
2433:   VecRestoreArrayWrite(offdiagV, &offdiagA);
2434:   VecDestroy(&diagV);
2435:   VecDestroy(&offdiagV);
2436:   PetscFree2(diagIdx, offdiagIdx);
2437:   return(0);
2438: }

2440: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A,Vec v,PetscInt idx[])
2441: {
2442:   Mat_MPIAIJ        *mat = (Mat_MPIAIJ*)A->data;
2443:   PetscInt          m = A->rmap->n,n = A->cmap->n;
2444:   PetscInt          cstart = A->cmap->rstart,cend = A->cmap->rend;
2445:   PetscInt          *cmap  = mat->garray;
2446:   PetscInt          *diagIdx, *offdiagIdx;
2447:   Vec               diagV, offdiagV;
2448:   PetscScalar       *a, *diagA, *offdiagA;
2449:   const PetscScalar *ba,*bav;
2450:   PetscInt          r,j,col,ncols,*bi,*bj;
2451:   PetscErrorCode    ierr;
2452:   Mat               B = mat->B;
2453:   Mat_SeqAIJ        *b = (Mat_SeqAIJ*)B->data;

2456:   /* When a process holds entire A and other processes have no entry */
2457:   if (A->cmap->N == n) {
2458:     VecGetArrayWrite(v,&diagA);
2459:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,m,diagA,&diagV);
2460:     MatGetRowMax(mat->A,diagV,idx);
2461:     VecDestroy(&diagV);
2462:     VecRestoreArrayWrite(v,&diagA);
2463:     return(0);
2464:   } else if (n == 0) {
2465:     if (m) {
2466:       VecGetArrayWrite(v,&a);
2467:       for (r = 0; r < m; r++) {a[r] = PETSC_MIN_REAL; if (idx) idx[r] = -1;}
2468:       VecRestoreArrayWrite(v,&a);
2469:     }
2470:     return(0);
2471:   }

2473:   PetscMalloc2(m,&diagIdx,m,&offdiagIdx);
2474:   VecCreateSeq(PETSC_COMM_SELF, m, &diagV);
2475:   VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV);
2476:   MatGetRowMax(mat->A, diagV, diagIdx);

2478:   /* Get offdiagIdx[] for implicit 0.0 */
2479:   MatSeqAIJGetArrayRead(B,&bav);
2480:   ba   = bav;
2481:   bi   = b->i;
2482:   bj   = b->j;
2483:   VecGetArrayWrite(offdiagV, &offdiagA);
2484:   for (r = 0; r < m; r++) {
2485:     ncols = bi[r+1] - bi[r];
2486:     if (ncols == A->cmap->N - n) { /* Brow is dense */
2487:       offdiagA[r] = *ba; offdiagIdx[r] = cmap[0];
2488:     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2489:       offdiagA[r] = 0.0;

2491:       /* Find first hole in the cmap */
2492:       for (j=0; j<ncols; j++) {
2493:         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2494:         if (col > j && j < cstart) {
2495:           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2496:           break;
2497:         } else if (col > j + n && j >= cstart) {
2498:           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2499:           break;
2500:         }
2501:       }
2502:       if (j == ncols && ncols < A->cmap->N - n) {
2503:         /* a hole is outside compressed Bcols */
2504:         if (ncols == 0) {
2505:           if (cstart) {
2506:             offdiagIdx[r] = 0;
2507:           } else offdiagIdx[r] = cend;
2508:         } else { /* ncols > 0 */
2509:           offdiagIdx[r] = cmap[ncols-1] + 1;
2510:           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2511:         }
2512:       }
2513:     }

2515:     for (j=0; j<ncols; j++) {
2516:       if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) {offdiagA[r] = *ba; offdiagIdx[r] = cmap[*bj];}
2517:       ba++; bj++;
2518:     }
2519:   }

2521:   VecGetArrayWrite(v,    &a);
2522:   VecGetArrayRead(diagV,(const PetscScalar**)&diagA);
2523:   for (r = 0; r < m; ++r) {
2524:     if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) {
2525:       a[r] = diagA[r];
2526:       if (idx) idx[r] = cstart + diagIdx[r];
2527:     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2528:       a[r] = diagA[r];
2529:       if (idx) {
2530:         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2531:           idx[r] = cstart + diagIdx[r];
2532:         } else idx[r] = offdiagIdx[r];
2533:       }
2534:     } else {
2535:       a[r] = offdiagA[r];
2536:       if (idx) idx[r] = offdiagIdx[r];
2537:     }
2538:   }
2539:   MatSeqAIJRestoreArrayRead(B,&bav);
2540:   VecRestoreArrayWrite(v,       &a);
2541:   VecRestoreArrayRead(diagV,   (const PetscScalar**)&diagA);
2542:   VecRestoreArrayWrite(offdiagV,&offdiagA);
2543:   VecDestroy(&diagV);
2544:   VecDestroy(&offdiagV);
2545:   PetscFree2(diagIdx, offdiagIdx);
2546:   return(0);
2547: }

2549: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2550: {
2552:   Mat            *dummy;

2555:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2556:   *newmat = *dummy;
2557:   PetscFree(dummy);
2558:   return(0);
2559: }

2561: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2562: {
2563:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2567:   MatInvertBlockDiagonal(a->A,values);
2568:   A->factorerrortype = a->A->factorerrortype;
2569:   return(0);
2570: }

2572: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2573: {
2575:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2578:   if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2579:   MatSetRandom(aij->A,rctx);
2580:   if (x->assembled) {
2581:     MatSetRandom(aij->B,rctx);
2582:   } else {
2583:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2584:   }
2585:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2586:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2587:   return(0);
2588: }

2590: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2591: {
2593:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2594:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2595:   return(0);
2596: }

2598: /*@
2599:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2601:    Collective on Mat

2603:    Input Parameters:
2604: +    A - the matrix
2605: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2607:  Level: advanced

2609: @*/
2610: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2611: {
2612:   PetscErrorCode       ierr;

2615:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2616:   return(0);
2617: }

2619: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2620: {
2621:   PetscErrorCode       ierr;
2622:   PetscBool            sc = PETSC_FALSE,flg;

2625:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2626:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2627:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2628:   if (flg) {
2629:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2630:   }
2631:   PetscOptionsTail();
2632:   return(0);
2633: }

2635: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2636: {
2638:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2639:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2642:   if (!Y->preallocated) {
2643:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2644:   } else if (!aij->nz) {
2645:     PetscInt nonew = aij->nonew;
2646:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2647:     aij->nonew = nonew;
2648:   }
2649:   MatShift_Basic(Y,a);
2650:   return(0);
2651: }

2653: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2654: {
2655:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2659:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2660:   MatMissingDiagonal(a->A,missing,d);
2661:   if (d) {
2662:     PetscInt rstart;
2663:     MatGetOwnershipRange(A,&rstart,NULL);
2664:     *d += rstart;

2666:   }
2667:   return(0);
2668: }

2670: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2671: {
2672:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2676:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2677:   return(0);
2678: }

2680: /* -------------------------------------------------------------------*/
2681: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2682:                                        MatGetRow_MPIAIJ,
2683:                                        MatRestoreRow_MPIAIJ,
2684:                                        MatMult_MPIAIJ,
2685:                                 /* 4*/ MatMultAdd_MPIAIJ,
2686:                                        MatMultTranspose_MPIAIJ,
2687:                                        MatMultTransposeAdd_MPIAIJ,
2688:                                        NULL,
2689:                                        NULL,
2690:                                        NULL,
2691:                                 /*10*/ NULL,
2692:                                        NULL,
2693:                                        NULL,
2694:                                        MatSOR_MPIAIJ,
2695:                                        MatTranspose_MPIAIJ,
2696:                                 /*15*/ MatGetInfo_MPIAIJ,
2697:                                        MatEqual_MPIAIJ,
2698:                                        MatGetDiagonal_MPIAIJ,
2699:                                        MatDiagonalScale_MPIAIJ,
2700:                                        MatNorm_MPIAIJ,
2701:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2702:                                        MatAssemblyEnd_MPIAIJ,
2703:                                        MatSetOption_MPIAIJ,
2704:                                        MatZeroEntries_MPIAIJ,
2705:                                 /*24*/ MatZeroRows_MPIAIJ,
2706:                                        NULL,
2707:                                        NULL,
2708:                                        NULL,
2709:                                        NULL,
2710:                                 /*29*/ MatSetUp_MPIAIJ,
2711:                                        NULL,
2712:                                        NULL,
2713:                                        MatGetDiagonalBlock_MPIAIJ,
2714:                                        NULL,
2715:                                 /*34*/ MatDuplicate_MPIAIJ,
2716:                                        NULL,
2717:                                        NULL,
2718:                                        NULL,
2719:                                        NULL,
2720:                                 /*39*/ MatAXPY_MPIAIJ,
2721:                                        MatCreateSubMatrices_MPIAIJ,
2722:                                        MatIncreaseOverlap_MPIAIJ,
2723:                                        MatGetValues_MPIAIJ,
2724:                                        MatCopy_MPIAIJ,
2725:                                 /*44*/ MatGetRowMax_MPIAIJ,
2726:                                        MatScale_MPIAIJ,
2727:                                        MatShift_MPIAIJ,
2728:                                        MatDiagonalSet_MPIAIJ,
2729:                                        MatZeroRowsColumns_MPIAIJ,
2730:                                 /*49*/ MatSetRandom_MPIAIJ,
2731:                                        NULL,
2732:                                        NULL,
2733:                                        NULL,
2734:                                        NULL,
2735:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2736:                                        NULL,
2737:                                        MatSetUnfactored_MPIAIJ,
2738:                                        MatPermute_MPIAIJ,
2739:                                        NULL,
2740:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2741:                                        MatDestroy_MPIAIJ,
2742:                                        MatView_MPIAIJ,
2743:                                        NULL,
2744:                                        NULL,
2745:                                 /*64*/ NULL,
2746:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2747:                                        NULL,
2748:                                        NULL,
2749:                                        NULL,
2750:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2751:                                        MatGetRowMinAbs_MPIAIJ,
2752:                                        NULL,
2753:                                        NULL,
2754:                                        NULL,
2755:                                        NULL,
2756:                                 /*75*/ MatFDColoringApply_AIJ,
2757:                                        MatSetFromOptions_MPIAIJ,
2758:                                        NULL,
2759:                                        NULL,
2760:                                        MatFindZeroDiagonals_MPIAIJ,
2761:                                 /*80*/ NULL,
2762:                                        NULL,
2763:                                        NULL,
2764:                                 /*83*/ MatLoad_MPIAIJ,
2765:                                        MatIsSymmetric_MPIAIJ,
2766:                                        NULL,
2767:                                        NULL,
2768:                                        NULL,
2769:                                        NULL,
2770:                                 /*89*/ NULL,
2771:                                        NULL,
2772:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2773:                                        NULL,
2774:                                        NULL,
2775:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2776:                                        NULL,
2777:                                        NULL,
2778:                                        NULL,
2779:                                        MatBindToCPU_MPIAIJ,
2780:                                 /*99*/ MatProductSetFromOptions_MPIAIJ,
2781:                                        NULL,
2782:                                        NULL,
2783:                                        MatConjugate_MPIAIJ,
2784:                                        NULL,
2785:                                 /*104*/MatSetValuesRow_MPIAIJ,
2786:                                        MatRealPart_MPIAIJ,
2787:                                        MatImaginaryPart_MPIAIJ,
2788:                                        NULL,
2789:                                        NULL,
2790:                                 /*109*/NULL,
2791:                                        NULL,
2792:                                        MatGetRowMin_MPIAIJ,
2793:                                        NULL,
2794:                                        MatMissingDiagonal_MPIAIJ,
2795:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2796:                                        NULL,
2797:                                        MatGetGhosts_MPIAIJ,
2798:                                        NULL,
2799:                                        NULL,
2800:                                 /*119*/MatMultDiagonalBlock_MPIAIJ,
2801:                                        NULL,
2802:                                        NULL,
2803:                                        NULL,
2804:                                        MatGetMultiProcBlock_MPIAIJ,
2805:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2806:                                        MatGetColumnReductions_MPIAIJ,
2807:                                        MatInvertBlockDiagonal_MPIAIJ,
2808:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2809:                                        MatCreateSubMatricesMPI_MPIAIJ,
2810:                                 /*129*/NULL,
2811:                                        NULL,
2812:                                        NULL,
2813:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2814:                                        NULL,
2815:                                 /*134*/NULL,
2816:                                        NULL,
2817:                                        NULL,
2818:                                        NULL,
2819:                                        NULL,
2820:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2821:                                        NULL,
2822:                                        NULL,
2823:                                        MatFDColoringSetUp_MPIXAIJ,
2824:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2825:                                        MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2826:                                 /*145*/NULL,
2827:                                        NULL,
2828:                                        NULL
2829: };

2831: /* ----------------------------------------------------------------------------------------*/

2833: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2834: {
2835:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2839:   MatStoreValues(aij->A);
2840:   MatStoreValues(aij->B);
2841:   return(0);
2842: }

2844: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2845: {
2846:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2850:   MatRetrieveValues(aij->A);
2851:   MatRetrieveValues(aij->B);
2852:   return(0);
2853: }

2855: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2856: {
2857:   Mat_MPIAIJ     *b;
2859:   PetscMPIInt    size;

2862:   PetscLayoutSetUp(B->rmap);
2863:   PetscLayoutSetUp(B->cmap);
2864:   b = (Mat_MPIAIJ*)B->data;

2866: #if defined(PETSC_USE_CTABLE)
2867:   PetscTableDestroy(&b->colmap);
2868: #else
2869:   PetscFree(b->colmap);
2870: #endif
2871:   PetscFree(b->garray);
2872:   VecDestroy(&b->lvec);
2873:   VecScatterDestroy(&b->Mvctx);

2875:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2876:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2877:   MatDestroy(&b->B);
2878:   MatCreate(PETSC_COMM_SELF,&b->B);
2879:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2880:   MatSetBlockSizesFromMats(b->B,B,B);
2881:   MatSetType(b->B,MATSEQAIJ);
2882:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2884:   if (!B->preallocated) {
2885:     MatCreate(PETSC_COMM_SELF,&b->A);
2886:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2887:     MatSetBlockSizesFromMats(b->A,B,B);
2888:     MatSetType(b->A,MATSEQAIJ);
2889:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2890:   }

2892:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2893:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2894:   B->preallocated  = PETSC_TRUE;
2895:   B->was_assembled = PETSC_FALSE;
2896:   B->assembled     = PETSC_FALSE;
2897:   return(0);
2898: }

2900: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2901: {
2902:   Mat_MPIAIJ     *b;

2907:   PetscLayoutSetUp(B->rmap);
2908:   PetscLayoutSetUp(B->cmap);
2909:   b = (Mat_MPIAIJ*)B->data;

2911: #if defined(PETSC_USE_CTABLE)
2912:   PetscTableDestroy(&b->colmap);
2913: #else
2914:   PetscFree(b->colmap);
2915: #endif
2916:   PetscFree(b->garray);
2917:   VecDestroy(&b->lvec);
2918:   VecScatterDestroy(&b->Mvctx);

2920:   MatResetPreallocation(b->A);
2921:   MatResetPreallocation(b->B);
2922:   B->preallocated  = PETSC_TRUE;
2923:   B->was_assembled = PETSC_FALSE;
2924:   B->assembled = PETSC_FALSE;
2925:   return(0);
2926: }

2928: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2929: {
2930:   Mat            mat;
2931:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2935:   *newmat = NULL;
2936:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2937:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2938:   MatSetBlockSizesFromMats(mat,matin,matin);
2939:   MatSetType(mat,((PetscObject)matin)->type_name);
2940:   a       = (Mat_MPIAIJ*)mat->data;

2942:   mat->factortype   = matin->factortype;
2943:   mat->assembled    = matin->assembled;
2944:   mat->insertmode   = NOT_SET_VALUES;
2945:   mat->preallocated = matin->preallocated;

2947:   a->size         = oldmat->size;
2948:   a->rank         = oldmat->rank;
2949:   a->donotstash   = oldmat->donotstash;
2950:   a->roworiented  = oldmat->roworiented;
2951:   a->rowindices   = NULL;
2952:   a->rowvalues    = NULL;
2953:   a->getrowactive = PETSC_FALSE;

2955:   PetscLayoutReference(matin->rmap,&mat->rmap);
2956:   PetscLayoutReference(matin->cmap,&mat->cmap);

2958:   if (oldmat->colmap) {
2959: #if defined(PETSC_USE_CTABLE)
2960:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2961: #else
2962:     PetscMalloc1(mat->cmap->N,&a->colmap);
2963:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2964:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2965: #endif
2966:   } else a->colmap = NULL;
2967:   if (oldmat->garray) {
2968:     PetscInt len;
2969:     len  = oldmat->B->cmap->n;
2970:     PetscMalloc1(len+1,&a->garray);
2971:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2972:     if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2973:   } else a->garray = NULL;

2975:   /* It may happen MatDuplicate is called with a non-assembled matrix
2976:      In fact, MatDuplicate only requires the matrix to be preallocated
2977:      This may happen inside a DMCreateMatrix_Shell */
2978:   if (oldmat->lvec) {
2979:     VecDuplicate(oldmat->lvec,&a->lvec);
2980:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2981:   }
2982:   if (oldmat->Mvctx) {
2983:     VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2984:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2985:   }
2986:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2987:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2988:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2989:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2990:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2991:   *newmat = mat;
2992:   return(0);
2993: }

2995: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2996: {
2997:   PetscBool      isbinary, ishdf5;

3003:   /* force binary viewer to load .info file if it has not yet done so */
3004:   PetscViewerSetUp(viewer);
3005:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
3006:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
3007:   if (isbinary) {
3008:     MatLoad_MPIAIJ_Binary(newMat,viewer);
3009:   } else if (ishdf5) {
3010: #if defined(PETSC_HAVE_HDF5)
3011:     MatLoad_AIJ_HDF5(newMat,viewer);
3012: #else
3013:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
3014: #endif
3015:   } else {
3016:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
3017:   }
3018:   return(0);
3019: }

3021: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
3022: {
3023:   PetscInt       header[4],M,N,m,nz,rows,cols,sum,i;
3024:   PetscInt       *rowidxs,*colidxs;
3025:   PetscScalar    *matvals;

3029:   PetscViewerSetUp(viewer);

3031:   /* read in matrix header */
3032:   PetscViewerBinaryRead(viewer,header,4,NULL,PETSC_INT);
3033:   if (header[0] != MAT_FILE_CLASSID) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Not a matrix object in file");
3034:   M  = header[1]; N = header[2]; nz = header[3];
3035:   if (M < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix row size (%D) in file is negative",M);
3036:   if (N < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix column size (%D) in file is negative",N);
3037:   if (nz < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk, cannot load as MPIAIJ");

3039:   /* set block sizes from the viewer's .info file */
3040:   MatLoad_Binary_BlockSizes(mat,viewer);
3041:   /* set global sizes if not set already */
3042:   if (mat->rmap->N < 0) mat->rmap->N = M;
3043:   if (mat->cmap->N < 0) mat->cmap->N = N;
3044:   PetscLayoutSetUp(mat->rmap);
3045:   PetscLayoutSetUp(mat->cmap);

3047:   /* check if the matrix sizes are correct */
3048:   MatGetSize(mat,&rows,&cols);
3049:   if (M != rows || N != cols) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%D, %D) than the input matrix (%D, %D)",M,N,rows,cols);

3051:   /* read in row lengths and build row indices */
3052:   MatGetLocalSize(mat,&m,NULL);
3053:   PetscMalloc1(m+1,&rowidxs);
3054:   PetscViewerBinaryReadAll(viewer,rowidxs+1,m,PETSC_DECIDE,M,PETSC_INT);
3055:   rowidxs[0] = 0; for (i=0; i<m; i++) rowidxs[i+1] += rowidxs[i];
3056:   MPIU_Allreduce(&rowidxs[m],&sum,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)viewer));
3057:   if (sum != nz) SETERRQ2(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Inconsistent matrix data in file: nonzeros = %D, sum-row-lengths = %D\n",nz,sum);
3058:   /* read in column indices and matrix values */
3059:   PetscMalloc2(rowidxs[m],&colidxs,rowidxs[m],&matvals);
3060:   PetscViewerBinaryReadAll(viewer,colidxs,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
3061:   PetscViewerBinaryReadAll(viewer,matvals,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
3062:   /* store matrix indices and values */
3063:   MatMPIAIJSetPreallocationCSR(mat,rowidxs,colidxs,matvals);
3064:   PetscFree(rowidxs);
3065:   PetscFree2(colidxs,matvals);
3066:   return(0);
3067: }

3069: /* Not scalable because of ISAllGather() unless getting all columns. */
3070: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3071: {
3073:   IS             iscol_local;
3074:   PetscBool      isstride;
3075:   PetscMPIInt    lisstride=0,gisstride;

3078:   /* check if we are grabbing all columns*/
3079:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

3081:   if (isstride) {
3082:     PetscInt  start,len,mstart,mlen;
3083:     ISStrideGetInfo(iscol,&start,NULL);
3084:     ISGetLocalSize(iscol,&len);
3085:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3086:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3087:   }

3089:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3090:   if (gisstride) {
3091:     PetscInt N;
3092:     MatGetSize(mat,NULL,&N);
3093:     ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol_local);
3094:     ISSetIdentity(iscol_local);
3095:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3096:   } else {
3097:     PetscInt cbs;
3098:     ISGetBlockSize(iscol,&cbs);
3099:     ISAllGather(iscol,&iscol_local);
3100:     ISSetBlockSize(iscol_local,cbs);
3101:   }

3103:   *isseq = iscol_local;
3104:   return(0);
3105: }

3107: /*
3108:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3109:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3111:  Input Parameters:
3112:    mat - matrix
3113:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3114:            i.e., mat->rstart <= isrow[i] < mat->rend
3115:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3116:            i.e., mat->cstart <= iscol[i] < mat->cend
3117:  Output Parameter:
3118:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3119:    iscol_o - sequential column index set for retrieving mat->B
3120:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3121:  */
3122: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3123: {
3125:   Vec            x,cmap;
3126:   const PetscInt *is_idx;
3127:   PetscScalar    *xarray,*cmaparray;
3128:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3129:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3130:   Mat            B=a->B;
3131:   Vec            lvec=a->lvec,lcmap;
3132:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3133:   MPI_Comm       comm;
3134:   VecScatter     Mvctx=a->Mvctx;

3137:   PetscObjectGetComm((PetscObject)mat,&comm);
3138:   ISGetLocalSize(iscol,&ncols);

3140:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3141:   MatCreateVecs(mat,&x,NULL);
3142:   VecSet(x,-1.0);
3143:   VecDuplicate(x,&cmap);
3144:   VecSet(cmap,-1.0);

3146:   /* Get start indices */
3147:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3148:   isstart -= ncols;
3149:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3151:   ISGetIndices(iscol,&is_idx);
3152:   VecGetArray(x,&xarray);
3153:   VecGetArray(cmap,&cmaparray);
3154:   PetscMalloc1(ncols,&idx);
3155:   for (i=0; i<ncols; i++) {
3156:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3157:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3158:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3159:   }
3160:   VecRestoreArray(x,&xarray);
3161:   VecRestoreArray(cmap,&cmaparray);
3162:   ISRestoreIndices(iscol,&is_idx);

3164:   /* Get iscol_d */
3165:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3166:   ISGetBlockSize(iscol,&i);
3167:   ISSetBlockSize(*iscol_d,i);

3169:   /* Get isrow_d */
3170:   ISGetLocalSize(isrow,&m);
3171:   rstart = mat->rmap->rstart;
3172:   PetscMalloc1(m,&idx);
3173:   ISGetIndices(isrow,&is_idx);
3174:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3175:   ISRestoreIndices(isrow,&is_idx);

3177:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3178:   ISGetBlockSize(isrow,&i);
3179:   ISSetBlockSize(*isrow_d,i);

3181:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3182:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3183:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3185:   VecDuplicate(lvec,&lcmap);

3187:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3188:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3190:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3191:   /* off-process column indices */
3192:   count = 0;
3193:   PetscMalloc1(Bn,&idx);
3194:   PetscMalloc1(Bn,&cmap1);

3196:   VecGetArray(lvec,&xarray);
3197:   VecGetArray(lcmap,&cmaparray);
3198:   for (i=0; i<Bn; i++) {
3199:     if (PetscRealPart(xarray[i]) > -1.0) {
3200:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3201:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3202:       count++;
3203:     }
3204:   }
3205:   VecRestoreArray(lvec,&xarray);
3206:   VecRestoreArray(lcmap,&cmaparray);

3208:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3209:   /* cannot ensure iscol_o has same blocksize as iscol! */

3211:   PetscFree(idx);
3212:   *garray = cmap1;

3214:   VecDestroy(&x);
3215:   VecDestroy(&cmap);
3216:   VecDestroy(&lcmap);
3217:   return(0);
3218: }

3220: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3221: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3222: {
3224:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3225:   Mat            M = NULL;
3226:   MPI_Comm       comm;
3227:   IS             iscol_d,isrow_d,iscol_o;
3228:   Mat            Asub = NULL,Bsub = NULL;
3229:   PetscInt       n;

3232:   PetscObjectGetComm((PetscObject)mat,&comm);

3234:   if (call == MAT_REUSE_MATRIX) {
3235:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3236:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3237:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3239:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3240:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3242:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3243:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3245:     /* Update diagonal and off-diagonal portions of submat */
3246:     asub = (Mat_MPIAIJ*)(*submat)->data;
3247:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3248:     ISGetLocalSize(iscol_o,&n);
3249:     if (n) {
3250:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3251:     }
3252:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3253:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3255:   } else { /* call == MAT_INITIAL_MATRIX) */
3256:     const PetscInt *garray;
3257:     PetscInt        BsubN;

3259:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3260:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3262:     /* Create local submatrices Asub and Bsub */
3263:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3264:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3266:     /* Create submatrix M */
3267:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3269:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3270:     asub = (Mat_MPIAIJ*)M->data;

3272:     ISGetLocalSize(iscol_o,&BsubN);
3273:     n = asub->B->cmap->N;
3274:     if (BsubN > n) {
3275:       /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3276:       const PetscInt *idx;
3277:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3278:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3280:       PetscMalloc1(n,&idx_new);
3281:       j = 0;
3282:       ISGetIndices(iscol_o,&idx);
3283:       for (i=0; i<n; i++) {
3284:         if (j >= BsubN) break;
3285:         while (subgarray[i] > garray[j]) j++;

3287:         if (subgarray[i] == garray[j]) {
3288:           idx_new[i] = idx[j++];
3289:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3290:       }
3291:       ISRestoreIndices(iscol_o,&idx);

3293:       ISDestroy(&iscol_o);
3294:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3296:     } else if (BsubN < n) {
3297:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3298:     }

3300:     PetscFree(garray);
3301:     *submat = M;

3303:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3304:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3305:     ISDestroy(&isrow_d);

3307:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3308:     ISDestroy(&iscol_d);

3310:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3311:     ISDestroy(&iscol_o);
3312:   }
3313:   return(0);
3314: }

3316: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3317: {
3319:   IS             iscol_local=NULL,isrow_d;
3320:   PetscInt       csize;
3321:   PetscInt       n,i,j,start,end;
3322:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3323:   MPI_Comm       comm;

3326:   /* If isrow has same processor distribution as mat,
3327:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3328:   if (call == MAT_REUSE_MATRIX) {
3329:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3330:     if (isrow_d) {
3331:       sameRowDist  = PETSC_TRUE;
3332:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3333:     } else {
3334:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3335:       if (iscol_local) {
3336:         sameRowDist  = PETSC_TRUE;
3337:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3338:       }
3339:     }
3340:   } else {
3341:     /* Check if isrow has same processor distribution as mat */
3342:     sameDist[0] = PETSC_FALSE;
3343:     ISGetLocalSize(isrow,&n);
3344:     if (!n) {
3345:       sameDist[0] = PETSC_TRUE;
3346:     } else {
3347:       ISGetMinMax(isrow,&i,&j);
3348:       MatGetOwnershipRange(mat,&start,&end);
3349:       if (i >= start && j < end) {
3350:         sameDist[0] = PETSC_TRUE;
3351:       }
3352:     }

3354:     /* Check if iscol has same processor distribution as mat */
3355:     sameDist[1] = PETSC_FALSE;
3356:     ISGetLocalSize(iscol,&n);
3357:     if (!n) {
3358:       sameDist[1] = PETSC_TRUE;
3359:     } else {
3360:       ISGetMinMax(iscol,&i,&j);
3361:       MatGetOwnershipRangeColumn(mat,&start,&end);
3362:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3363:     }

3365:     PetscObjectGetComm((PetscObject)mat,&comm);
3366:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3367:     sameRowDist = tsameDist[0];
3368:   }

3370:   if (sameRowDist) {
3371:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3372:       /* isrow and iscol have same processor distribution as mat */
3373:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3374:       return(0);
3375:     } else { /* sameRowDist */
3376:       /* isrow has same processor distribution as mat */
3377:       if (call == MAT_INITIAL_MATRIX) {
3378:         PetscBool sorted;
3379:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3380:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3381:         ISGetSize(iscol,&i);
3382:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3384:         ISSorted(iscol_local,&sorted);
3385:         if (sorted) {
3386:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3387:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3388:           return(0);
3389:         }
3390:       } else { /* call == MAT_REUSE_MATRIX */
3391:         IS iscol_sub;
3392:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3393:         if (iscol_sub) {
3394:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3395:           return(0);
3396:         }
3397:       }
3398:     }
3399:   }

3401:   /* General case: iscol -> iscol_local which has global size of iscol */
3402:   if (call == MAT_REUSE_MATRIX) {
3403:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3404:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3405:   } else {
3406:     if (!iscol_local) {
3407:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3408:     }
3409:   }

3411:   ISGetLocalSize(iscol,&csize);
3412:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3414:   if (call == MAT_INITIAL_MATRIX) {
3415:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3416:     ISDestroy(&iscol_local);
3417:   }
3418:   return(0);
3419: }

3421: /*@C
3422:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3423:          and "off-diagonal" part of the matrix in CSR format.

3425:    Collective

3427:    Input Parameters:
3428: +  comm - MPI communicator
3429: .  A - "diagonal" portion of matrix
3430: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3431: -  garray - global index of B columns

3433:    Output Parameter:
3434: .   mat - the matrix, with input A as its local diagonal matrix
3435:    Level: advanced

3437:    Notes:
3438:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3439:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3441: .seealso: MatCreateMPIAIJWithSplitArrays()
3442: @*/
3443: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3444: {
3445:   PetscErrorCode    ierr;
3446:   Mat_MPIAIJ        *maij;
3447:   Mat_SeqAIJ        *b=(Mat_SeqAIJ*)B->data,*bnew;
3448:   PetscInt          *oi=b->i,*oj=b->j,i,nz,col;
3449:   const PetscScalar *oa;
3450:   Mat               Bnew;
3451:   PetscInt          m,n,N;

3454:   MatCreate(comm,mat);
3455:   MatGetSize(A,&m,&n);
3456:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3457:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3458:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3459:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3461:   /* Get global columns of mat */
3462:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3464:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3465:   MatSetType(*mat,MATMPIAIJ);
3466:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3467:   maij = (Mat_MPIAIJ*)(*mat)->data;

3469:   (*mat)->preallocated = PETSC_TRUE;

3471:   PetscLayoutSetUp((*mat)->rmap);
3472:   PetscLayoutSetUp((*mat)->cmap);

3474:   /* Set A as diagonal portion of *mat */
3475:   maij->A = A;

3477:   nz = oi[m];
3478:   for (i=0; i<nz; i++) {
3479:     col   = oj[i];
3480:     oj[i] = garray[col];
3481:   }

3483:   /* Set Bnew as off-diagonal portion of *mat */
3484:   MatSeqAIJGetArrayRead(B,&oa);
3485:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,(PetscScalar*)oa,&Bnew);
3486:   MatSeqAIJRestoreArrayRead(B,&oa);
3487:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3488:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3489:   maij->B     = Bnew;

3491:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3493:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3494:   b->free_a       = PETSC_FALSE;
3495:   b->free_ij      = PETSC_FALSE;
3496:   MatDestroy(&B);

3498:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3499:   bnew->free_a       = PETSC_TRUE;
3500:   bnew->free_ij      = PETSC_TRUE;

3502:   /* condense columns of maij->B */
3503:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3504:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3505:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3506:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3507:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3508:   return(0);
3509: }

3511: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3513: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3514: {
3516:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3517:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3518:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3519:   Mat            M,Msub,B=a->B;
3520:   MatScalar      *aa;
3521:   Mat_SeqAIJ     *aij;
3522:   PetscInt       *garray = a->garray,*colsub,Ncols;
3523:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3524:   IS             iscol_sub,iscmap;
3525:   const PetscInt *is_idx,*cmap;
3526:   PetscBool      allcolumns=PETSC_FALSE;
3527:   MPI_Comm       comm;

3530:   PetscObjectGetComm((PetscObject)mat,&comm);
3531:   if (call == MAT_REUSE_MATRIX) {
3532:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3533:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3534:     ISGetLocalSize(iscol_sub,&count);

3536:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3537:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3539:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3540:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3542:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3544:   } else { /* call == MAT_INITIAL_MATRIX) */
3545:     PetscBool flg;

3547:     ISGetLocalSize(iscol,&n);
3548:     ISGetSize(iscol,&Ncols);

3550:     /* (1) iscol -> nonscalable iscol_local */
3551:     /* Check for special case: each processor gets entire matrix columns */
3552:     ISIdentity(iscol_local,&flg);
3553:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3554:     MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));
3555:     if (allcolumns) {
3556:       iscol_sub = iscol_local;
3557:       PetscObjectReference((PetscObject)iscol_local);
3558:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3560:     } else {
3561:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3562:       PetscInt *idx,*cmap1,k;
3563:       PetscMalloc1(Ncols,&idx);
3564:       PetscMalloc1(Ncols,&cmap1);
3565:       ISGetIndices(iscol_local,&is_idx);
3566:       count = 0;
3567:       k     = 0;
3568:       for (i=0; i<Ncols; i++) {
3569:         j = is_idx[i];
3570:         if (j >= cstart && j < cend) {
3571:           /* diagonal part of mat */
3572:           idx[count]     = j;
3573:           cmap1[count++] = i; /* column index in submat */
3574:         } else if (Bn) {
3575:           /* off-diagonal part of mat */
3576:           if (j == garray[k]) {
3577:             idx[count]     = j;
3578:             cmap1[count++] = i;  /* column index in submat */
3579:           } else if (j > garray[k]) {
3580:             while (j > garray[k] && k < Bn-1) k++;
3581:             if (j == garray[k]) {
3582:               idx[count]     = j;
3583:               cmap1[count++] = i; /* column index in submat */
3584:             }
3585:           }
3586:         }
3587:       }
3588:       ISRestoreIndices(iscol_local,&is_idx);

3590:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3591:       ISGetBlockSize(iscol,&cbs);
3592:       ISSetBlockSize(iscol_sub,cbs);

3594:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3595:     }

3597:     /* (3) Create sequential Msub */
3598:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3599:   }

3601:   ISGetLocalSize(iscol_sub,&count);
3602:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3603:   ii   = aij->i;
3604:   ISGetIndices(iscmap,&cmap);

3606:   /*
3607:       m - number of local rows
3608:       Ncols - number of columns (same on all processors)
3609:       rstart - first row in new global matrix generated
3610:   */
3611:   MatGetSize(Msub,&m,NULL);

3613:   if (call == MAT_INITIAL_MATRIX) {
3614:     /* (4) Create parallel newmat */
3615:     PetscMPIInt    rank,size;
3616:     PetscInt       csize;

3618:     MPI_Comm_size(comm,&size);
3619:     MPI_Comm_rank(comm,&rank);

3621:     /*
3622:         Determine the number of non-zeros in the diagonal and off-diagonal
3623:         portions of the matrix in order to do correct preallocation
3624:     */

3626:     /* first get start and end of "diagonal" columns */
3627:     ISGetLocalSize(iscol,&csize);
3628:     if (csize == PETSC_DECIDE) {
3629:       ISGetSize(isrow,&mglobal);
3630:       if (mglobal == Ncols) { /* square matrix */
3631:         nlocal = m;
3632:       } else {
3633:         nlocal = Ncols/size + ((Ncols % size) > rank);
3634:       }
3635:     } else {
3636:       nlocal = csize;
3637:     }
3638:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3639:     rstart = rend - nlocal;
3640:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3642:     /* next, compute all the lengths */
3643:     jj    = aij->j;
3644:     PetscMalloc1(2*m+1,&dlens);
3645:     olens = dlens + m;
3646:     for (i=0; i<m; i++) {
3647:       jend = ii[i+1] - ii[i];
3648:       olen = 0;
3649:       dlen = 0;
3650:       for (j=0; j<jend; j++) {
3651:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3652:         else dlen++;
3653:         jj++;
3654:       }
3655:       olens[i] = olen;
3656:       dlens[i] = dlen;
3657:     }

3659:     ISGetBlockSize(isrow,&bs);
3660:     ISGetBlockSize(iscol,&cbs);

3662:     MatCreate(comm,&M);
3663:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3664:     MatSetBlockSizes(M,bs,cbs);
3665:     MatSetType(M,((PetscObject)mat)->type_name);
3666:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3667:     PetscFree(dlens);

3669:   } else { /* call == MAT_REUSE_MATRIX */
3670:     M    = *newmat;
3671:     MatGetLocalSize(M,&i,NULL);
3672:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3673:     MatZeroEntries(M);
3674:     /*
3675:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3676:        rather than the slower MatSetValues().
3677:     */
3678:     M->was_assembled = PETSC_TRUE;
3679:     M->assembled     = PETSC_FALSE;
3680:   }

3682:   /* (5) Set values of Msub to *newmat */
3683:   PetscMalloc1(count,&colsub);
3684:   MatGetOwnershipRange(M,&rstart,NULL);

3686:   jj   = aij->j;
3687:   MatSeqAIJGetArrayRead(Msub,(const PetscScalar**)&aa);
3688:   for (i=0; i<m; i++) {
3689:     row = rstart + i;
3690:     nz  = ii[i+1] - ii[i];
3691:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3692:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3693:     jj += nz; aa += nz;
3694:   }
3695:   MatSeqAIJRestoreArrayRead(Msub,(const PetscScalar**)&aa);
3696:   ISRestoreIndices(iscmap,&cmap);

3698:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3699:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3701:   PetscFree(colsub);

3703:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3704:   if (call == MAT_INITIAL_MATRIX) {
3705:     *newmat = M;
3706:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3707:     MatDestroy(&Msub);

3709:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3710:     ISDestroy(&iscol_sub);

3712:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3713:     ISDestroy(&iscmap);

3715:     if (iscol_local) {
3716:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3717:       ISDestroy(&iscol_local);
3718:     }
3719:   }
3720:   return(0);
3721: }

3723: /*
3724:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3725:   in local and then by concatenating the local matrices the end result.
3726:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3728:   Note: This requires a sequential iscol with all indices.
3729: */
3730: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3731: {
3733:   PetscMPIInt    rank,size;
3734:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3735:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3736:   Mat            M,Mreuse;
3737:   MatScalar      *aa,*vwork;
3738:   MPI_Comm       comm;
3739:   Mat_SeqAIJ     *aij;
3740:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3743:   PetscObjectGetComm((PetscObject)mat,&comm);
3744:   MPI_Comm_rank(comm,&rank);
3745:   MPI_Comm_size(comm,&size);

3747:   /* Check for special case: each processor gets entire matrix columns */
3748:   ISIdentity(iscol,&colflag);
3749:   ISGetLocalSize(iscol,&n);
3750:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3751:   MPIU_Allreduce(MPI_IN_PLACE,&allcolumns,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));

3753:   if (call ==  MAT_REUSE_MATRIX) {
3754:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3755:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3756:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3757:   } else {
3758:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3759:   }

3761:   /*
3762:       m - number of local rows
3763:       n - number of columns (same on all processors)
3764:       rstart - first row in new global matrix generated
3765:   */
3766:   MatGetSize(Mreuse,&m,&n);
3767:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3768:   if (call == MAT_INITIAL_MATRIX) {
3769:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3770:     ii  = aij->i;
3771:     jj  = aij->j;

3773:     /*
3774:         Determine the number of non-zeros in the diagonal and off-diagonal
3775:         portions of the matrix in order to do correct preallocation
3776:     */

3778:     /* first get start and end of "diagonal" columns */
3779:     if (csize == PETSC_DECIDE) {
3780:       ISGetSize(isrow,&mglobal);
3781:       if (mglobal == n) { /* square matrix */
3782:         nlocal = m;
3783:       } else {
3784:         nlocal = n/size + ((n % size) > rank);
3785:       }
3786:     } else {
3787:       nlocal = csize;
3788:     }
3789:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3790:     rstart = rend - nlocal;
3791:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3793:     /* next, compute all the lengths */
3794:     PetscMalloc1(2*m+1,&dlens);
3795:     olens = dlens + m;
3796:     for (i=0; i<m; i++) {
3797:       jend = ii[i+1] - ii[i];
3798:       olen = 0;
3799:       dlen = 0;
3800:       for (j=0; j<jend; j++) {
3801:         if (*jj < rstart || *jj >= rend) olen++;
3802:         else dlen++;
3803:         jj++;
3804:       }
3805:       olens[i] = olen;
3806:       dlens[i] = dlen;
3807:     }
3808:     MatCreate(comm,&M);
3809:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3810:     MatSetBlockSizes(M,bs,cbs);
3811:     MatSetType(M,((PetscObject)mat)->type_name);
3812:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3813:     PetscFree(dlens);
3814:   } else {
3815:     PetscInt ml,nl;

3817:     M    = *newmat;
3818:     MatGetLocalSize(M,&ml,&nl);
3819:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3820:     MatZeroEntries(M);
3821:     /*
3822:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3823:        rather than the slower MatSetValues().
3824:     */
3825:     M->was_assembled = PETSC_TRUE;
3826:     M->assembled     = PETSC_FALSE;
3827:   }
3828:   MatGetOwnershipRange(M,&rstart,&rend);
3829:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3830:   ii   = aij->i;
3831:   jj   = aij->j;

3833:   /* trigger copy to CPU if needed */
3834:   MatSeqAIJGetArrayRead(Mreuse,(const PetscScalar**)&aa);
3835:   for (i=0; i<m; i++) {
3836:     row   = rstart + i;
3837:     nz    = ii[i+1] - ii[i];
3838:     cwork = jj; jj += nz;
3839:     vwork = aa; aa += nz;
3840:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3841:   }
3842:   MatSeqAIJRestoreArrayRead(Mreuse,(const PetscScalar**)&aa);

3844:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3845:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3846:   *newmat = M;

3848:   /* save submatrix used in processor for next request */
3849:   if (call ==  MAT_INITIAL_MATRIX) {
3850:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3851:     MatDestroy(&Mreuse);
3852:   }
3853:   return(0);
3854: }

3856: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3857: {
3858:   PetscInt       m,cstart, cend,j,nnz,i,d;
3859:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3860:   const PetscInt *JJ;
3862:   PetscBool      nooffprocentries;

3865:   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3867:   PetscLayoutSetUp(B->rmap);
3868:   PetscLayoutSetUp(B->cmap);
3869:   m      = B->rmap->n;
3870:   cstart = B->cmap->rstart;
3871:   cend   = B->cmap->rend;
3872:   rstart = B->rmap->rstart;

3874:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3876:   if (PetscDefined(USE_DEBUG)) {
3877:     for (i=0; i<m; i++) {
3878:       nnz = Ii[i+1]- Ii[i];
3879:       JJ  = J + Ii[i];
3880:       if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3881:       if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3882:       if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3883:     }
3884:   }

3886:   for (i=0; i<m; i++) {
3887:     nnz     = Ii[i+1]- Ii[i];
3888:     JJ      = J + Ii[i];
3889:     nnz_max = PetscMax(nnz_max,nnz);
3890:     d       = 0;
3891:     for (j=0; j<nnz; j++) {
3892:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3893:     }
3894:     d_nnz[i] = d;
3895:     o_nnz[i] = nnz - d;
3896:   }
3897:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3898:   PetscFree2(d_nnz,o_nnz);

3900:   for (i=0; i<m; i++) {
3901:     ii   = i + rstart;
3902:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3903:   }
3904:   nooffprocentries    = B->nooffprocentries;
3905:   B->nooffprocentries = PETSC_TRUE;
3906:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3907:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3908:   B->nooffprocentries = nooffprocentries;

3910:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3911:   return(0);
3912: }

3914: /*@
3915:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3916:    (the default parallel PETSc format).

3918:    Collective

3920:    Input Parameters:
3921: +  B - the matrix
3922: .  i - the indices into j for the start of each local row (starts with zero)
3923: .  j - the column indices for each local row (starts with zero)
3924: -  v - optional values in the matrix

3926:    Level: developer

3928:    Notes:
3929:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3930:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
3931:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

3933:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

3935:        The format which is used for the sparse matrix input, is equivalent to a
3936:     row-major ordering.. i.e for the following matrix, the input data expected is
3937:     as shown

3939: $        1 0 0
3940: $        2 0 3     P0
3941: $       -------
3942: $        4 5 6     P1
3943: $
3944: $     Process0 [P0]: rows_owned=[0,1]
3945: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3946: $        j =  {0,0,2}  [size = 3]
3947: $        v =  {1,2,3}  [size = 3]
3948: $
3949: $     Process1 [P1]: rows_owned=[2]
3950: $        i =  {0,3}    [size = nrow+1  = 1+1]
3951: $        j =  {0,1,2}  [size = 3]
3952: $        v =  {4,5,6}  [size = 3]

3954: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3955:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3956: @*/
3957: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3958: {

3962:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3963:   return(0);
3964: }

3966: /*@C
3967:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3968:    (the default parallel PETSc format).  For good matrix assembly performance
3969:    the user should preallocate the matrix storage by setting the parameters
3970:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3971:    performance can be increased by more than a factor of 50.

3973:    Collective

3975:    Input Parameters:
3976: +  B - the matrix
3977: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3978:            (same value is used for all local rows)
3979: .  d_nnz - array containing the number of nonzeros in the various rows of the
3980:            DIAGONAL portion of the local submatrix (possibly different for each row)
3981:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3982:            The size of this array is equal to the number of local rows, i.e 'm'.
3983:            For matrices that will be factored, you must leave room for (and set)
3984:            the diagonal entry even if it is zero.
3985: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3986:            submatrix (same value is used for all local rows).
3987: -  o_nnz - array containing the number of nonzeros in the various rows of the
3988:            OFF-DIAGONAL portion of the local submatrix (possibly different for
3989:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3990:            structure. The size of this array is equal to the number
3991:            of local rows, i.e 'm'.

3993:    If the *_nnz parameter is given then the *_nz parameter is ignored

3995:    The AIJ format (also called the Yale sparse matrix format or
3996:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3997:    storage.  The stored row and column indices begin with zero.
3998:    See Users-Manual: ch_mat for details.

4000:    The parallel matrix is partitioned such that the first m0 rows belong to
4001:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
4002:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

4004:    The DIAGONAL portion of the local submatrix of a processor can be defined
4005:    as the submatrix which is obtained by extraction the part corresponding to
4006:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4007:    first row that belongs to the processor, r2 is the last row belonging to
4008:    the this processor, and c1-c2 is range of indices of the local part of a
4009:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
4010:    common case of a square matrix, the row and column ranges are the same and
4011:    the DIAGONAL part is also square. The remaining portion of the local
4012:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

4014:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4016:    You can call MatGetInfo() to get information on how effective the preallocation was;
4017:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4018:    You can also run with the option -info and look for messages with the string
4019:    malloc in them to see if additional memory allocation was needed.

4021:    Example usage:

4023:    Consider the following 8x8 matrix with 34 non-zero values, that is
4024:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4025:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4026:    as follows:

4028: .vb
4029:             1  2  0  |  0  3  0  |  0  4
4030:     Proc0   0  5  6  |  7  0  0  |  8  0
4031:             9  0 10  | 11  0  0  | 12  0
4032:     -------------------------------------
4033:            13  0 14  | 15 16 17  |  0  0
4034:     Proc1   0 18  0  | 19 20 21  |  0  0
4035:             0  0  0  | 22 23  0  | 24  0
4036:     -------------------------------------
4037:     Proc2  25 26 27  |  0  0 28  | 29  0
4038:            30  0  0  | 31 32 33  |  0 34
4039: .ve

4041:    This can be represented as a collection of submatrices as:

4043: .vb
4044:       A B C
4045:       D E F
4046:       G H I
4047: .ve

4049:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4050:    owned by proc1, G,H,I are owned by proc2.

4052:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4053:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4054:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4056:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4057:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4058:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4059:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4060:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4061:    matrix, ans [DF] as another SeqAIJ matrix.

4063:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4064:    allocated for every row of the local diagonal submatrix, and o_nz
4065:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4066:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4067:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4068:    In this case, the values of d_nz,o_nz are:
4069: .vb
4070:      proc0 : dnz = 2, o_nz = 2
4071:      proc1 : dnz = 3, o_nz = 2
4072:      proc2 : dnz = 1, o_nz = 4
4073: .ve
4074:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4075:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4076:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4077:    34 values.

4079:    When d_nnz, o_nnz parameters are specified, the storage is specified
4080:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4081:    In the above case the values for d_nnz,o_nnz are:
4082: .vb
4083:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4084:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4085:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4086: .ve
4087:    Here the space allocated is sum of all the above values i.e 34, and
4088:    hence pre-allocation is perfect.

4090:    Level: intermediate

4092: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4093:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4094: @*/
4095: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4096: {

4102:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4103:   return(0);
4104: }

4106: /*@
4107:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4108:          CSR format for the local rows.

4110:    Collective

4112:    Input Parameters:
4113: +  comm - MPI communicator
4114: .  m - number of local rows (Cannot be PETSC_DECIDE)
4115: .  n - This value should be the same as the local size used in creating the
4116:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4117:        calculated if N is given) For square matrices n is almost always m.
4118: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4119: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4120: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4121: .   j - column indices
4122: -   a - matrix values

4124:    Output Parameter:
4125: .   mat - the matrix

4127:    Level: intermediate

4129:    Notes:
4130:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4131:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4132:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4134:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4136:        The format which is used for the sparse matrix input, is equivalent to a
4137:     row-major ordering.. i.e for the following matrix, the input data expected is
4138:     as shown

4140:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

4142: $        1 0 0
4143: $        2 0 3     P0
4144: $       -------
4145: $        4 5 6     P1
4146: $
4147: $     Process0 [P0]: rows_owned=[0,1]
4148: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4149: $        j =  {0,0,2}  [size = 3]
4150: $        v =  {1,2,3}  [size = 3]
4151: $
4152: $     Process1 [P1]: rows_owned=[2]
4153: $        i =  {0,3}    [size = nrow+1  = 1+1]
4154: $        j =  {0,1,2}  [size = 3]
4155: $        v =  {4,5,6}  [size = 3]

4157: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4158:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4159: @*/
4160: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4161: {

4165:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4166:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4167:   MatCreate(comm,mat);
4168:   MatSetSizes(*mat,m,n,M,N);
4169:   /* MatSetBlockSizes(M,bs,cbs); */
4170:   MatSetType(*mat,MATMPIAIJ);
4171:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4172:   return(0);
4173: }

4175: /*@
4176:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4177:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4179:    Collective

4181:    Input Parameters:
4182: +  mat - the matrix
4183: .  m - number of local rows (Cannot be PETSC_DECIDE)
4184: .  n - This value should be the same as the local size used in creating the
4185:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4186:        calculated if N is given) For square matrices n is almost always m.
4187: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4188: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4189: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4190: .  J - column indices
4191: -  v - matrix values

4193:    Level: intermediate

4195: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4196:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4197: @*/
4198: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4199: {
4201:   PetscInt       cstart,nnz,i,j;
4202:   PetscInt       *ld;
4203:   PetscBool      nooffprocentries;
4204:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4205:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data, *Ao  = (Mat_SeqAIJ*)Aij->B->data;
4206:   PetscScalar    *ad = Ad->a, *ao = Ao->a;
4207:   const PetscInt *Adi = Ad->i;
4208:   PetscInt       ldi,Iii,md;

4211:   if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4212:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4213:   if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4214:   if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");

4216:   cstart = mat->cmap->rstart;
4217:   if (!Aij->ld) {
4218:     /* count number of entries below block diagonal */
4219:     PetscCalloc1(m,&ld);
4220:     Aij->ld = ld;
4221:     for (i=0; i<m; i++) {
4222:       nnz  = Ii[i+1]- Ii[i];
4223:       j     = 0;
4224:       while  (J[j] < cstart && j < nnz) {j++;}
4225:       J    += nnz;
4226:       ld[i] = j;
4227:     }
4228:   } else {
4229:     ld = Aij->ld;
4230:   }

4232:   for (i=0; i<m; i++) {
4233:     nnz  = Ii[i+1]- Ii[i];
4234:     Iii  = Ii[i];
4235:     ldi  = ld[i];
4236:     md   = Adi[i+1]-Adi[i];
4237:     PetscArraycpy(ao,v + Iii,ldi);
4238:     PetscArraycpy(ad,v + Iii + ldi,md);
4239:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4240:     ad  += md;
4241:     ao  += nnz - md;
4242:   }
4243:   nooffprocentries      = mat->nooffprocentries;
4244:   mat->nooffprocentries = PETSC_TRUE;
4245:   PetscObjectStateIncrease((PetscObject)Aij->A);
4246:   PetscObjectStateIncrease((PetscObject)Aij->B);
4247:   PetscObjectStateIncrease((PetscObject)mat);
4248:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4249:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4250:   mat->nooffprocentries = nooffprocentries;
4251:   return(0);
4252: }

4254: /*@C
4255:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4256:    (the default parallel PETSc format).  For good matrix assembly performance
4257:    the user should preallocate the matrix storage by setting the parameters
4258:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4259:    performance can be increased by more than a factor of 50.

4261:    Collective

4263:    Input Parameters:
4264: +  comm - MPI communicator
4265: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4266:            This value should be the same as the local size used in creating the
4267:            y vector for the matrix-vector product y = Ax.
4268: .  n - This value should be the same as the local size used in creating the
4269:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4270:        calculated if N is given) For square matrices n is almost always m.
4271: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4272: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4273: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4274:            (same value is used for all local rows)
4275: .  d_nnz - array containing the number of nonzeros in the various rows of the
4276:            DIAGONAL portion of the local submatrix (possibly different for each row)
4277:            or NULL, if d_nz is used to specify the nonzero structure.
4278:            The size of this array is equal to the number of local rows, i.e 'm'.
4279: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4280:            submatrix (same value is used for all local rows).
4281: -  o_nnz - array containing the number of nonzeros in the various rows of the
4282:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4283:            each row) or NULL, if o_nz is used to specify the nonzero
4284:            structure. The size of this array is equal to the number
4285:            of local rows, i.e 'm'.

4287:    Output Parameter:
4288: .  A - the matrix

4290:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4291:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4292:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4294:    Notes:
4295:    If the *_nnz parameter is given then the *_nz parameter is ignored

4297:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4298:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4299:    storage requirements for this matrix.

4301:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4302:    processor than it must be used on all processors that share the object for
4303:    that argument.

4305:    The user MUST specify either the local or global matrix dimensions
4306:    (possibly both).

4308:    The parallel matrix is partitioned across processors such that the
4309:    first m0 rows belong to process 0, the next m1 rows belong to
4310:    process 1, the next m2 rows belong to process 2 etc.. where
4311:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4312:    values corresponding to [m x N] submatrix.

4314:    The columns are logically partitioned with the n0 columns belonging
4315:    to 0th partition, the next n1 columns belonging to the next
4316:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4318:    The DIAGONAL portion of the local submatrix on any given processor
4319:    is the submatrix corresponding to the rows and columns m,n
4320:    corresponding to the given processor. i.e diagonal matrix on
4321:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4322:    etc. The remaining portion of the local submatrix [m x (N-n)]
4323:    constitute the OFF-DIAGONAL portion. The example below better
4324:    illustrates this concept.

4326:    For a square global matrix we define each processor's diagonal portion
4327:    to be its local rows and the corresponding columns (a square submatrix);
4328:    each processor's off-diagonal portion encompasses the remainder of the
4329:    local matrix (a rectangular submatrix).

4331:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4333:    When calling this routine with a single process communicator, a matrix of
4334:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4335:    type of communicator, use the construction mechanism
4336: .vb
4337:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4338: .ve

4340: $     MatCreate(...,&A);
4341: $     MatSetType(A,MATMPIAIJ);
4342: $     MatSetSizes(A, m,n,M,N);
4343: $     MatMPIAIJSetPreallocation(A,...);

4345:    By default, this format uses inodes (identical nodes) when possible.
4346:    We search for consecutive rows with the same nonzero structure, thereby
4347:    reusing matrix information to achieve increased efficiency.

4349:    Options Database Keys:
4350: +  -mat_no_inode  - Do not use inodes
4351: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)

4353:    Example usage:

4355:    Consider the following 8x8 matrix with 34 non-zero values, that is
4356:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4357:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4358:    as follows

4360: .vb
4361:             1  2  0  |  0  3  0  |  0  4
4362:     Proc0   0  5  6  |  7  0  0  |  8  0
4363:             9  0 10  | 11  0  0  | 12  0
4364:     -------------------------------------
4365:            13  0 14  | 15 16 17  |  0  0
4366:     Proc1   0 18  0  | 19 20 21  |  0  0
4367:             0  0  0  | 22 23  0  | 24  0
4368:     -------------------------------------
4369:     Proc2  25 26 27  |  0  0 28  | 29  0
4370:            30  0  0  | 31 32 33  |  0 34
4371: .ve

4373:    This can be represented as a collection of submatrices as

4375: .vb
4376:       A B C
4377:       D E F
4378:       G H I
4379: .ve

4381:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4382:    owned by proc1, G,H,I are owned by proc2.

4384:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4385:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4386:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4388:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4389:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4390:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4391:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4392:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4393:    matrix, ans [DF] as another SeqAIJ matrix.

4395:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4396:    allocated for every row of the local diagonal submatrix, and o_nz
4397:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4398:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4399:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4400:    In this case, the values of d_nz,o_nz are
4401: .vb
4402:      proc0 : dnz = 2, o_nz = 2
4403:      proc1 : dnz = 3, o_nz = 2
4404:      proc2 : dnz = 1, o_nz = 4
4405: .ve
4406:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4407:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4408:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4409:    34 values.

4411:    When d_nnz, o_nnz parameters are specified, the storage is specified
4412:    for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4413:    In the above case the values for d_nnz,o_nnz are
4414: .vb
4415:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4416:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4417:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4418: .ve
4419:    Here the space allocated is sum of all the above values i.e 34, and
4420:    hence pre-allocation is perfect.

4422:    Level: intermediate

4424: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4425:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4426: @*/
4427: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4428: {
4430:   PetscMPIInt    size;

4433:   MatCreate(comm,A);
4434:   MatSetSizes(*A,m,n,M,N);
4435:   MPI_Comm_size(comm,&size);
4436:   if (size > 1) {
4437:     MatSetType(*A,MATMPIAIJ);
4438:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4439:   } else {
4440:     MatSetType(*A,MATSEQAIJ);
4441:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4442:   }
4443:   return(0);
4444: }

4446: /*@C
4447:   MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix

4449:   Not collective

4451:   Input Parameter:
4452: . A - The MPIAIJ matrix

4454:   Output Parameters:
4455: + Ad - The local diagonal block as a SeqAIJ matrix
4456: . Ao - The local off-diagonal block as a SeqAIJ matrix
4457: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix

4459:   Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4460:   in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4461:   the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4462:   local column numbers to global column numbers in the original matrix.

4464:   Level: intermediate

4466: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAIJ, MATSEQAIJ
4467: @*/
4468: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4469: {
4470:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4471:   PetscBool      flg;

4475:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4476:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4477:   if (Ad)     *Ad     = a->A;
4478:   if (Ao)     *Ao     = a->B;
4479:   if (colmap) *colmap = a->garray;
4480:   return(0);
4481: }

4483: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4484: {
4486:   PetscInt       m,N,i,rstart,nnz,Ii;
4487:   PetscInt       *indx;
4488:   PetscScalar    *values;
4489:   MatType        rootType;

4492:   MatGetSize(inmat,&m,&N);
4493:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4494:     PetscInt       *dnz,*onz,sum,bs,cbs;

4496:     if (n == PETSC_DECIDE) {
4497:       PetscSplitOwnership(comm,&n,&N);
4498:     }
4499:     /* Check sum(n) = N */
4500:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4501:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4503:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4504:     rstart -= m;

4506:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4507:     for (i=0; i<m; i++) {
4508:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4509:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4510:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4511:     }

4513:     MatCreate(comm,outmat);
4514:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4515:     MatGetBlockSizes(inmat,&bs,&cbs);
4516:     MatSetBlockSizes(*outmat,bs,cbs);
4517:     MatGetRootType_Private(inmat,&rootType);
4518:     MatSetType(*outmat,rootType);
4519:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4520:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4521:     MatPreallocateFinalize(dnz,onz);
4522:     MatSetOption(*outmat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
4523:   }

4525:   /* numeric phase */
4526:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4527:   for (i=0; i<m; i++) {
4528:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4529:     Ii   = i + rstart;
4530:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4531:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4532:   }
4533:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4534:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4535:   return(0);
4536: }

4538: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4539: {
4540:   PetscErrorCode    ierr;
4541:   PetscMPIInt       rank;
4542:   PetscInt          m,N,i,rstart,nnz;
4543:   size_t            len;
4544:   const PetscInt    *indx;
4545:   PetscViewer       out;
4546:   char              *name;
4547:   Mat               B;
4548:   const PetscScalar *values;

4551:   MatGetLocalSize(A,&m,NULL);
4552:   MatGetSize(A,NULL,&N);
4553:   /* Should this be the type of the diagonal block of A? */
4554:   MatCreate(PETSC_COMM_SELF,&B);
4555:   MatSetSizes(B,m,N,m,N);
4556:   MatSetBlockSizesFromMats(B,A,A);
4557:   MatSetType(B,MATSEQAIJ);
4558:   MatSeqAIJSetPreallocation(B,0,NULL);
4559:   MatGetOwnershipRange(A,&rstart,NULL);
4560:   for (i=0; i<m; i++) {
4561:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4562:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4563:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4564:   }
4565:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4566:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4568:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4569:   PetscStrlen(outfile,&len);
4570:   PetscMalloc1(len+6,&name);
4571:   PetscSNPrintf(name,len+6,"%s.%d",outfile,rank);
4572:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4573:   PetscFree(name);
4574:   MatView(B,out);
4575:   PetscViewerDestroy(&out);
4576:   MatDestroy(&B);
4577:   return(0);
4578: }

4580: static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data)
4581: {
4582:   PetscErrorCode      ierr;
4583:   Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data;

4586:   if (!merge) return(0);
4587:   PetscFree(merge->id_r);
4588:   PetscFree(merge->len_s);
4589:   PetscFree(merge->len_r);
4590:   PetscFree(merge->bi);
4591:   PetscFree(merge->bj);
4592:   PetscFree(merge->buf_ri[0]);
4593:   PetscFree(merge->buf_ri);
4594:   PetscFree(merge->buf_rj[0]);
4595:   PetscFree(merge->buf_rj);
4596:   PetscFree(merge->coi);
4597:   PetscFree(merge->coj);
4598:   PetscFree(merge->owners_co);
4599:   PetscLayoutDestroy(&merge->rowmap);
4600:   PetscFree(merge);
4601:   return(0);
4602: }

4604: #include <../src/mat/utils/freespace.h>
4605: #include <petscbt.h>

4607: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4608: {
4609:   PetscErrorCode      ierr;
4610:   MPI_Comm            comm;
4611:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4612:   PetscMPIInt         size,rank,taga,*len_s;
4613:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4614:   PetscInt            proc,m;
4615:   PetscInt            **buf_ri,**buf_rj;
4616:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4617:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4618:   MPI_Request         *s_waits,*r_waits;
4619:   MPI_Status          *status;
4620:   MatScalar           *aa=a->a;
4621:   MatScalar           **abuf_r,*ba_i;
4622:   Mat_Merge_SeqsToMPI *merge;
4623:   PetscContainer      container;

4626:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4627:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4629:   MPI_Comm_size(comm,&size);
4630:   MPI_Comm_rank(comm,&rank);

4632:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4633:   if (!container) SETERRQ(PetscObjectComm((PetscObject)mpimat),PETSC_ERR_PLIB,"Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic");
4634:   PetscContainerGetPointer(container,(void**)&merge);

4636:   bi     = merge->bi;
4637:   bj     = merge->bj;
4638:   buf_ri = merge->buf_ri;
4639:   buf_rj = merge->buf_rj;

4641:   PetscMalloc1(size,&status);
4642:   owners = merge->rowmap->range;
4643:   len_s  = merge->len_s;

4645:   /* send and recv matrix values */
4646:   /*-----------------------------*/
4647:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4648:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4650:   PetscMalloc1(merge->nsend+1,&s_waits);
4651:   for (proc=0,k=0; proc<size; proc++) {
4652:     if (!len_s[proc]) continue;
4653:     i    = owners[proc];
4654:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4655:     k++;
4656:   }

4658:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4659:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4660:   PetscFree(status);

4662:   PetscFree(s_waits);
4663:   PetscFree(r_waits);

4665:   /* insert mat values of mpimat */
4666:   /*----------------------------*/
4667:   PetscMalloc1(N,&ba_i);
4668:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4670:   for (k=0; k<merge->nrecv; k++) {
4671:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4672:     nrows       = *(buf_ri_k[k]);
4673:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4674:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4675:   }

4677:   /* set values of ba */
4678:   m = merge->rowmap->n;
4679:   for (i=0; i<m; i++) {
4680:     arow = owners[rank] + i;
4681:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4682:     bnzi = bi[i+1] - bi[i];
4683:     PetscArrayzero(ba_i,bnzi);

4685:     /* add local non-zero vals of this proc's seqmat into ba */
4686:     anzi   = ai[arow+1] - ai[arow];
4687:     aj     = a->j + ai[arow];
4688:     aa     = a->a + ai[arow];
4689:     nextaj = 0;
4690:     for (j=0; nextaj<anzi; j++) {
4691:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4692:         ba_i[j] += aa[nextaj++];
4693:       }
4694:     }

4696:     /* add received vals into ba */
4697:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4698:       /* i-th row */
4699:       if (i == *nextrow[k]) {
4700:         anzi   = *(nextai[k]+1) - *nextai[k];
4701:         aj     = buf_rj[k] + *(nextai[k]);
4702:         aa     = abuf_r[k] + *(nextai[k]);
4703:         nextaj = 0;
4704:         for (j=0; nextaj<anzi; j++) {
4705:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4706:             ba_i[j] += aa[nextaj++];
4707:           }
4708:         }
4709:         nextrow[k]++; nextai[k]++;
4710:       }
4711:     }
4712:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4713:   }
4714:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4715:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4717:   PetscFree(abuf_r[0]);
4718:   PetscFree(abuf_r);
4719:   PetscFree(ba_i);
4720:   PetscFree3(buf_ri_k,nextrow,nextai);
4721:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4722:   return(0);
4723: }

4725: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4726: {
4727:   PetscErrorCode      ierr;
4728:   Mat                 B_mpi;
4729:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4730:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4731:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4732:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4733:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4734:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4735:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4736:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4737:   MPI_Status          *status;
4738:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4739:   PetscBT             lnkbt;
4740:   Mat_Merge_SeqsToMPI *merge;
4741:   PetscContainer      container;

4744:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4746:   /* make sure it is a PETSc comm */
4747:   PetscCommDuplicate(comm,&comm,NULL);
4748:   MPI_Comm_size(comm,&size);
4749:   MPI_Comm_rank(comm,&rank);

4751:   PetscNew(&merge);
4752:   PetscMalloc1(size,&status);

4754:   /* determine row ownership */
4755:   /*---------------------------------------------------------*/
4756:   PetscLayoutCreate(comm,&merge->rowmap);
4757:   PetscLayoutSetLocalSize(merge->rowmap,m);
4758:   PetscLayoutSetSize(merge->rowmap,M);
4759:   PetscLayoutSetBlockSize(merge->rowmap,1);
4760:   PetscLayoutSetUp(merge->rowmap);
4761:   PetscMalloc1(size,&len_si);
4762:   PetscMalloc1(size,&merge->len_s);

4764:   m      = merge->rowmap->n;
4765:   owners = merge->rowmap->range;

4767:   /* determine the number of messages to send, their lengths */
4768:   /*---------------------------------------------------------*/
4769:   len_s = merge->len_s;

4771:   len          = 0; /* length of buf_si[] */
4772:   merge->nsend = 0;
4773:   for (proc=0; proc<size; proc++) {
4774:     len_si[proc] = 0;
4775:     if (proc == rank) {
4776:       len_s[proc] = 0;
4777:     } else {
4778:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4779:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4780:     }
4781:     if (len_s[proc]) {
4782:       merge->nsend++;
4783:       nrows = 0;
4784:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4785:         if (ai[i+1] > ai[i]) nrows++;
4786:       }
4787:       len_si[proc] = 2*(nrows+1);
4788:       len         += len_si[proc];
4789:     }
4790:   }

4792:   /* determine the number and length of messages to receive for ij-structure */
4793:   /*-------------------------------------------------------------------------*/
4794:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4795:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4797:   /* post the Irecv of j-structure */
4798:   /*-------------------------------*/
4799:   PetscCommGetNewTag(comm,&tagj);
4800:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4802:   /* post the Isend of j-structure */
4803:   /*--------------------------------*/
4804:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4806:   for (proc=0, k=0; proc<size; proc++) {
4807:     if (!len_s[proc]) continue;
4808:     i    = owners[proc];
4809:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4810:     k++;
4811:   }

4813:   /* receives and sends of j-structure are complete */
4814:   /*------------------------------------------------*/
4815:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4816:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4818:   /* send and recv i-structure */
4819:   /*---------------------------*/
4820:   PetscCommGetNewTag(comm,&tagi);
4821:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4823:   PetscMalloc1(len+1,&buf_s);
4824:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4825:   for (proc=0,k=0; proc<size; proc++) {
4826:     if (!len_s[proc]) continue;
4827:     /* form outgoing message for i-structure:
4828:          buf_si[0]:                 nrows to be sent
4829:                [1:nrows]:           row index (global)
4830:                [nrows+1:2*nrows+1]: i-structure index
4831:     */
4832:     /*-------------------------------------------*/
4833:     nrows       = len_si[proc]/2 - 1;
4834:     buf_si_i    = buf_si + nrows+1;
4835:     buf_si[0]   = nrows;
4836:     buf_si_i[0] = 0;
4837:     nrows       = 0;
4838:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4839:       anzi = ai[i+1] - ai[i];
4840:       if (anzi) {
4841:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4842:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4843:         nrows++;
4844:       }
4845:     }
4846:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4847:     k++;
4848:     buf_si += len_si[proc];
4849:   }

4851:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4852:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4854:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4855:   for (i=0; i<merge->nrecv; i++) {
4856:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4857:   }

4859:   PetscFree(len_si);
4860:   PetscFree(len_ri);
4861:   PetscFree(rj_waits);
4862:   PetscFree2(si_waits,sj_waits);
4863:   PetscFree(ri_waits);
4864:   PetscFree(buf_s);
4865:   PetscFree(status);

4867:   /* compute a local seq matrix in each processor */
4868:   /*----------------------------------------------*/
4869:   /* allocate bi array and free space for accumulating nonzero column info */
4870:   PetscMalloc1(m+1,&bi);
4871:   bi[0] = 0;

4873:   /* create and initialize a linked list */
4874:   nlnk = N+1;
4875:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4877:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4878:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4879:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4881:   current_space = free_space;

4883:   /* determine symbolic info for each local row */
4884:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4886:   for (k=0; k<merge->nrecv; k++) {
4887:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4888:     nrows       = *buf_ri_k[k];
4889:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4890:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4891:   }

4893:   MatPreallocateInitialize(comm,m,n,dnz,onz);
4894:   len  = 0;
4895:   for (i=0; i<m; i++) {
4896:     bnzi = 0;
4897:     /* add local non-zero cols of this proc's seqmat into lnk */
4898:     arow  = owners[rank] + i;
4899:     anzi  = ai[arow+1] - ai[arow];
4900:     aj    = a->j + ai[arow];
4901:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4902:     bnzi += nlnk;
4903:     /* add received col data into lnk */
4904:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4905:       if (i == *nextrow[k]) { /* i-th row */
4906:         anzi  = *(nextai[k]+1) - *nextai[k];
4907:         aj    = buf_rj[k] + *nextai[k];
4908:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4909:         bnzi += nlnk;
4910:         nextrow[k]++; nextai[k]++;
4911:       }
4912:     }
4913:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

4915:     /* if free space is not available, make more free space */
4916:     if (current_space->local_remaining<bnzi) {
4917:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
4918:       nspacedouble++;
4919:     }
4920:     /* copy data into free space, then initialize lnk */
4921:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4922:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

4924:     current_space->array           += bnzi;
4925:     current_space->local_used      += bnzi;
4926:     current_space->local_remaining -= bnzi;

4928:     bi[i+1] = bi[i] + bnzi;
4929:   }

4931:   PetscFree3(buf_ri_k,nextrow,nextai);

4933:   PetscMalloc1(bi[m]+1,&bj);
4934:   PetscFreeSpaceContiguous(&free_space,bj);
4935:   PetscLLDestroy(lnk,lnkbt);

4937:   /* create symbolic parallel matrix B_mpi */
4938:   /*---------------------------------------*/
4939:   MatGetBlockSizes(seqmat,&bs,&cbs);
4940:   MatCreate(comm,&B_mpi);
4941:   if (n==PETSC_DECIDE) {
4942:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4943:   } else {
4944:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4945:   }
4946:   MatSetBlockSizes(B_mpi,bs,cbs);
4947:   MatSetType(B_mpi,MATMPIAIJ);
4948:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4949:   MatPreallocateFinalize(dnz,onz);
4950:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

4952:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4953:   B_mpi->assembled  = PETSC_FALSE;
4954:   merge->bi         = bi;
4955:   merge->bj         = bj;
4956:   merge->buf_ri     = buf_ri;
4957:   merge->buf_rj     = buf_rj;
4958:   merge->coi        = NULL;
4959:   merge->coj        = NULL;
4960:   merge->owners_co  = NULL;

4962:   PetscCommDestroy(&comm);

4964:   /* attach the supporting struct to B_mpi for reuse */
4965:   PetscContainerCreate(PETSC_COMM_SELF,&container);
4966:   PetscContainerSetPointer(container,merge);
4967:   PetscContainerSetUserDestroy(container,MatDestroy_MPIAIJ_SeqsToMPI);
4968:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4969:   PetscContainerDestroy(&container);
4970:   *mpimat = B_mpi;

4972:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4973:   return(0);
4974: }

4976: /*@C
4977:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4978:                  matrices from each processor

4980:     Collective

4982:    Input Parameters:
4983: +    comm - the communicators the parallel matrix will live on
4984: .    seqmat - the input sequential matrices
4985: .    m - number of local rows (or PETSC_DECIDE)
4986: .    n - number of local columns (or PETSC_DECIDE)
4987: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4989:    Output Parameter:
4990: .    mpimat - the parallel matrix generated

4992:     Level: advanced

4994:    Notes:
4995:      The dimensions of the sequential matrix in each processor MUST be the same.
4996:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4997:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4998: @*/
4999: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
5000: {
5002:   PetscMPIInt    size;

5005:   MPI_Comm_size(comm,&size);
5006:   if (size == 1) {
5007:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5008:     if (scall == MAT_INITIAL_MATRIX) {
5009:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
5010:     } else {
5011:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
5012:     }
5013:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5014:     return(0);
5015:   }
5016:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5017:   if (scall == MAT_INITIAL_MATRIX) {
5018:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
5019:   }
5020:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
5021:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5022:   return(0);
5023: }

5025: /*@
5026:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5027:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
5028:           with MatGetSize()

5030:     Not Collective

5032:    Input Parameters:
5033: +    A - the matrix
5034: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5036:    Output Parameter:
5037: .    A_loc - the local sequential matrix generated

5039:     Level: developer

5041:    Notes:
5042:      When the communicator associated with A has size 1 and MAT_INITIAL_MATRIX is requested, the matrix returned is the diagonal part of A.
5043:      If MAT_REUSE_MATRIX is requested with comm size 1, MatCopy(Adiag,*A_loc,SAME_NONZERO_PATTERN) is called.
5044:      This means that one can preallocate the proper sequential matrix first and then call this routine with MAT_REUSE_MATRIX to safely
5045:      modify the values of the returned A_loc.

5047: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed(), MatMPIAIJGetLocalMatMerge()
5048: @*/
5049: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
5050: {
5051:   PetscErrorCode    ierr;
5052:   Mat_MPIAIJ        *mpimat=(Mat_MPIAIJ*)A->data;
5053:   Mat_SeqAIJ        *mat,*a,*b;
5054:   PetscInt          *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
5055:   const PetscScalar *aa,*ba,*aav,*bav;
5056:   PetscScalar       *ca,*cam;
5057:   PetscMPIInt       size;
5058:   PetscInt          am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
5059:   PetscInt          *ci,*cj,col,ncols_d,ncols_o,jo;
5060:   PetscBool         match;

5063:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
5064:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5065:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
5066:   if (size == 1) {
5067:     if (scall == MAT_INITIAL_MATRIX) {
5068:       PetscObjectReference((PetscObject)mpimat->A);
5069:       *A_loc = mpimat->A;
5070:     } else if (scall == MAT_REUSE_MATRIX) {
5071:       MatCopy(mpimat->A,*A_loc,SAME_NONZERO_PATTERN);
5072:     }
5073:     return(0);
5074:   }

5076:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5077:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
5078:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
5079:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5080:   MatSeqAIJGetArrayRead(mpimat->A,&aav);
5081:   MatSeqAIJGetArrayRead(mpimat->B,&bav);
5082:   aa   = aav;
5083:   ba   = bav;
5084:   if (scall == MAT_INITIAL_MATRIX) {
5085:     PetscMalloc1(1+am,&ci);
5086:     ci[0] = 0;
5087:     for (i=0; i<am; i++) {
5088:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5089:     }
5090:     PetscMalloc1(1+ci[am],&cj);
5091:     PetscMalloc1(1+ci[am],&ca);
5092:     k    = 0;
5093:     for (i=0; i<am; i++) {
5094:       ncols_o = bi[i+1] - bi[i];
5095:       ncols_d = ai[i+1] - ai[i];
5096:       /* off-diagonal portion of A */
5097:       for (jo=0; jo<ncols_o; jo++) {
5098:         col = cmap[*bj];
5099:         if (col >= cstart) break;
5100:         cj[k]   = col; bj++;
5101:         ca[k++] = *ba++;
5102:       }
5103:       /* diagonal portion of A */
5104:       for (j=0; j<ncols_d; j++) {
5105:         cj[k]   = cstart + *aj++;
5106:         ca[k++] = *aa++;
5107:       }
5108:       /* off-diagonal portion of A */
5109:       for (j=jo; j<ncols_o; j++) {
5110:         cj[k]   = cmap[*bj++];
5111:         ca[k++] = *ba++;
5112:       }
5113:     }
5114:     /* put together the new matrix */
5115:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5116:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5117:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5118:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5119:     mat->free_a  = PETSC_TRUE;
5120:     mat->free_ij = PETSC_TRUE;
5121:     mat->nonew   = 0;
5122:   } else if (scall == MAT_REUSE_MATRIX) {
5123:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5124: #if defined(PETSC_USE_DEVICE)
5125:     (*A_loc)->offloadmask = PETSC_OFFLOAD_CPU;
5126: #endif
5127:     ci = mat->i; cj = mat->j; cam = mat->a;
5128:     for (i=0; i<am; i++) {
5129:       /* off-diagonal portion of A */
5130:       ncols_o = bi[i+1] - bi[i];
5131:       for (jo=0; jo<ncols_o; jo++) {
5132:         col = cmap[*bj];
5133:         if (col >= cstart) break;
5134:         *cam++ = *ba++; bj++;
5135:       }
5136:       /* diagonal portion of A */
5137:       ncols_d = ai[i+1] - ai[i];
5138:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5139:       /* off-diagonal portion of A */
5140:       for (j=jo; j<ncols_o; j++) {
5141:         *cam++ = *ba++; bj++;
5142:       }
5143:     }
5144:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5145:   MatSeqAIJRestoreArrayRead(mpimat->A,&aav);
5146:   MatSeqAIJRestoreArrayRead(mpimat->B,&bav);
5147:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5148:   return(0);
5149: }

5151: /*@
5152:      MatMPIAIJGetLocalMatMerge - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5153:           mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and offdiagonal part

5155:     Not Collective

5157:    Input Parameters:
5158: +    A - the matrix
5159: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5161:    Output Parameters:
5162: +    glob - sequential IS with global indices associated with the columns of the local sequential matrix generated (can be NULL)
5163: -    A_loc - the local sequential matrix generated

5165:     Level: developer

5167:    Notes:
5168:      This is different from MatMPIAIJGetLocalMat() since the first columns in the returning matrix are those associated with the diagonal part, then those associated with the offdiagonal part (in its local ordering)

5170: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed()

5172: @*/
5173: PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A,MatReuse scall,IS *glob,Mat *A_loc)
5174: {
5176:   Mat            Ao,Ad;
5177:   const PetscInt *cmap;
5178:   PetscMPIInt    size;
5179:   PetscErrorCode (*f)(Mat,MatReuse,IS*,Mat*);

5182:   MatMPIAIJGetSeqAIJ(A,&Ad,&Ao,&cmap);
5183:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
5184:   if (size == 1) {
5185:     if (scall == MAT_INITIAL_MATRIX) {
5186:       PetscObjectReference((PetscObject)Ad);
5187:       *A_loc = Ad;
5188:     } else if (scall == MAT_REUSE_MATRIX) {
5189:       MatCopy(Ad,*A_loc,SAME_NONZERO_PATTERN);
5190:     }
5191:     if (glob) { ISCreateStride(PetscObjectComm((PetscObject)Ad),Ad->cmap->n,Ad->cmap->rstart,1,glob); }
5192:     return(0);
5193:   }
5194:   PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",&f);
5195:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5196:   if (f) {
5197:     (*f)(A,scall,glob,A_loc);
5198:   } else {
5199:     Mat_SeqAIJ        *a = (Mat_SeqAIJ*)Ad->data;
5200:     Mat_SeqAIJ        *b = (Mat_SeqAIJ*)Ao->data;
5201:     Mat_SeqAIJ        *c;
5202:     PetscInt          *ai = a->i, *aj = a->j;
5203:     PetscInt          *bi = b->i, *bj = b->j;
5204:     PetscInt          *ci,*cj;
5205:     const PetscScalar *aa,*ba;
5206:     PetscScalar       *ca;
5207:     PetscInt          i,j,am,dn,on;

5209:     MatGetLocalSize(Ad,&am,&dn);
5210:     MatGetLocalSize(Ao,NULL,&on);
5211:     MatSeqAIJGetArrayRead(Ad,&aa);
5212:     MatSeqAIJGetArrayRead(Ao,&ba);
5213:     if (scall == MAT_INITIAL_MATRIX) {
5214:       PetscInt k;
5215:       PetscMalloc1(1+am,&ci);
5216:       PetscMalloc1(ai[am]+bi[am],&cj);
5217:       PetscMalloc1(ai[am]+bi[am],&ca);
5218:       ci[0] = 0;
5219:       for (i=0,k=0; i<am; i++) {
5220:         const PetscInt ncols_o = bi[i+1] - bi[i];
5221:         const PetscInt ncols_d = ai[i+1] - ai[i];
5222:         ci[i+1] = ci[i] + ncols_o + ncols_d;
5223:         /* diagonal portion of A */
5224:         for (j=0; j<ncols_d; j++,k++) {
5225:           cj[k] = *aj++;
5226:           ca[k] = *aa++;
5227:         }
5228:         /* off-diagonal portion of A */
5229:         for (j=0; j<ncols_o; j++,k++) {
5230:           cj[k] = dn + *bj++;
5231:           ca[k] = *ba++;
5232:         }
5233:       }
5234:       /* put together the new matrix */
5235:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,dn+on,ci,cj,ca,A_loc);
5236:       /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5237:       /* Since these are PETSc arrays, change flags to free them as necessary. */
5238:       c          = (Mat_SeqAIJ*)(*A_loc)->data;
5239:       c->free_a  = PETSC_TRUE;
5240:       c->free_ij = PETSC_TRUE;
5241:       c->nonew   = 0;
5242:       MatSetType(*A_loc,((PetscObject)Ad)->type_name);
5243:     } else if (scall == MAT_REUSE_MATRIX) {
5244: #if defined(PETSC_HAVE_DEVICE)
5245:       (*A_loc)->offloadmask = PETSC_OFFLOAD_CPU;
5246: #endif
5247:       c  = (Mat_SeqAIJ*)(*A_loc)->data;
5248:       ca = c->a;
5249:       for (i=0; i<am; i++) {
5250:         const PetscInt ncols_d = ai[i+1] - ai[i];
5251:         const PetscInt ncols_o = bi[i+1] - bi[i];
5252:         /* diagonal portion of A */
5253:         for (j=0; j<ncols_d; j++) *ca++ = *aa++;
5254:         /* off-diagonal portion of A */
5255:         for (j=0; j<ncols_o; j++) *ca++ = *ba++;
5256:       }
5257:     } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5258:     MatSeqAIJRestoreArrayRead(Ad,&aa);
5259:     MatSeqAIJRestoreArrayRead(Ao,&aa);
5260:     if (glob) {
5261:       PetscInt cst, *gidx;

5263:       MatGetOwnershipRangeColumn(A,&cst,NULL);
5264:       PetscMalloc1(dn+on,&gidx);
5265:       for (i=0; i<dn; i++) gidx[i]    = cst + i;
5266:       for (i=0; i<on; i++) gidx[i+dn] = cmap[i];
5267:       ISCreateGeneral(PetscObjectComm((PetscObject)Ad),dn+on,gidx,PETSC_OWN_POINTER,glob);
5268:     }
5269:   }
5270:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5271:   return(0);
5272: }

5274: /*@C
5275:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5277:     Not Collective

5279:    Input Parameters:
5280: +    A - the matrix
5281: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5282: -    row, col - index sets of rows and columns to extract (or NULL)

5284:    Output Parameter:
5285: .    A_loc - the local sequential matrix generated

5287:     Level: developer

5289: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5291: @*/
5292: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5293: {
5294:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5296:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5297:   IS             isrowa,iscola;
5298:   Mat            *aloc;
5299:   PetscBool      match;

5302:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5303:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5304:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5305:   if (!row) {
5306:     start = A->rmap->rstart; end = A->rmap->rend;
5307:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5308:   } else {
5309:     isrowa = *row;
5310:   }
5311:   if (!col) {
5312:     start = A->cmap->rstart;
5313:     cmap  = a->garray;
5314:     nzA   = a->A->cmap->n;
5315:     nzB   = a->B->cmap->n;
5316:     PetscMalloc1(nzA+nzB, &idx);
5317:     ncols = 0;
5318:     for (i=0; i<nzB; i++) {
5319:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5320:       else break;
5321:     }
5322:     imark = i;
5323:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5324:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5325:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5326:   } else {
5327:     iscola = *col;
5328:   }
5329:   if (scall != MAT_INITIAL_MATRIX) {
5330:     PetscMalloc1(1,&aloc);
5331:     aloc[0] = *A_loc;
5332:   }
5333:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5334:   if (!col) { /* attach global id of condensed columns */
5335:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5336:   }
5337:   *A_loc = aloc[0];
5338:   PetscFree(aloc);
5339:   if (!row) {
5340:     ISDestroy(&isrowa);
5341:   }
5342:   if (!col) {
5343:     ISDestroy(&iscola);
5344:   }
5345:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5346:   return(0);
5347: }

5349: /*
5350:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5351:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5352:  * on a global size.
5353:  * */
5354: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5355: {
5356:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5357:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5358:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,lidx,*nrcols,*nlcols,ncol;
5359:   PetscMPIInt              owner;
5360:   PetscSFNode              *iremote,*oiremote;
5361:   const PetscInt           *lrowindices;
5362:   PetscErrorCode           ierr;
5363:   PetscSF                  sf,osf;
5364:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5365:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5366:   MPI_Comm                 comm;
5367:   ISLocalToGlobalMapping   mapping;

5370:   PetscObjectGetComm((PetscObject)P,&comm);
5371:   /* plocalsize is the number of roots
5372:    * nrows is the number of leaves
5373:    * */
5374:   MatGetLocalSize(P,&plocalsize,NULL);
5375:   ISGetLocalSize(rows,&nrows);
5376:   PetscCalloc1(nrows,&iremote);
5377:   ISGetIndices(rows,&lrowindices);
5378:   for (i=0;i<nrows;i++) {
5379:     /* Find a remote index and an owner for a row
5380:      * The row could be local or remote
5381:      * */
5382:     owner = 0;
5383:     lidx  = 0;
5384:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5385:     iremote[i].index = lidx;
5386:     iremote[i].rank  = owner;
5387:   }
5388:   /* Create SF to communicate how many nonzero columns for each row */
5389:   PetscSFCreate(comm,&sf);
5390:   /* SF will figure out the number of nonzero colunms for each row, and their
5391:    * offsets
5392:    * */
5393:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5394:   PetscSFSetFromOptions(sf);
5395:   PetscSFSetUp(sf);

5397:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5398:   PetscCalloc1(2*plocalsize,&nrcols);
5399:   PetscCalloc1(nrows,&pnnz);
5400:   roffsets[0] = 0;
5401:   roffsets[1] = 0;
5402:   for (i=0;i<plocalsize;i++) {
5403:     /* diag */
5404:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5405:     /* off diag */
5406:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5407:     /* compute offsets so that we relative location for each row */
5408:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5409:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5410:   }
5411:   PetscCalloc1(2*nrows,&nlcols);
5412:   PetscCalloc1(2*nrows,&loffsets);
5413:   /* 'r' means root, and 'l' means leaf */
5414:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5415:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5416:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols,MPI_REPLACE);
5417:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets,MPI_REPLACE);
5418:   PetscSFDestroy(&sf);
5419:   PetscFree(roffsets);
5420:   PetscFree(nrcols);
5421:   dntotalcols = 0;
5422:   ontotalcols = 0;
5423:   ncol = 0;
5424:   for (i=0;i<nrows;i++) {
5425:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5426:     ncol = PetscMax(pnnz[i],ncol);
5427:     /* diag */
5428:     dntotalcols += nlcols[i*2+0];
5429:     /* off diag */
5430:     ontotalcols += nlcols[i*2+1];
5431:   }
5432:   /* We do not need to figure the right number of columns
5433:    * since all the calculations will be done by going through the raw data
5434:    * */
5435:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5436:   MatSetUp(*P_oth);
5437:   PetscFree(pnnz);
5438:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5439:   /* diag */
5440:   PetscCalloc1(dntotalcols,&iremote);
5441:   /* off diag */
5442:   PetscCalloc1(ontotalcols,&oiremote);
5443:   /* diag */
5444:   PetscCalloc1(dntotalcols,&ilocal);
5445:   /* off diag */
5446:   PetscCalloc1(ontotalcols,&oilocal);
5447:   dntotalcols = 0;
5448:   ontotalcols = 0;
5449:   ntotalcols  = 0;
5450:   for (i=0;i<nrows;i++) {
5451:     owner = 0;
5452:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5453:     /* Set iremote for diag matrix */
5454:     for (j=0;j<nlcols[i*2+0];j++) {
5455:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5456:       iremote[dntotalcols].rank    = owner;
5457:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5458:       ilocal[dntotalcols++]        = ntotalcols++;
5459:     }
5460:     /* off diag */
5461:     for (j=0;j<nlcols[i*2+1];j++) {
5462:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5463:       oiremote[ontotalcols].rank    = owner;
5464:       oilocal[ontotalcols++]        = ntotalcols++;
5465:     }
5466:   }
5467:   ISRestoreIndices(rows,&lrowindices);
5468:   PetscFree(loffsets);
5469:   PetscFree(nlcols);
5470:   PetscSFCreate(comm,&sf);
5471:   /* P serves as roots and P_oth is leaves
5472:    * Diag matrix
5473:    * */
5474:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5475:   PetscSFSetFromOptions(sf);
5476:   PetscSFSetUp(sf);

5478:   PetscSFCreate(comm,&osf);
5479:   /* Off diag */
5480:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5481:   PetscSFSetFromOptions(osf);
5482:   PetscSFSetUp(osf);
5483:   /* We operate on the matrix internal data for saving memory */
5484:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5485:   PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5486:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5487:   /* Convert to global indices for diag matrix */
5488:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5489:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5490:   /* We want P_oth store global indices */
5491:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5492:   /* Use memory scalable approach */
5493:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5494:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5495:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5496:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j,MPI_REPLACE);
5497:   /* Convert back to local indices */
5498:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5499:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j,MPI_REPLACE);
5500:   nout = 0;
5501:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5502:   if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5503:   ISLocalToGlobalMappingDestroy(&mapping);
5504:   /* Exchange values */
5505:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5506:   PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5507:   /* Stop PETSc from shrinking memory */
5508:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5509:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5510:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5511:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5512:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5513:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5514:   PetscSFDestroy(&sf);
5515:   PetscSFDestroy(&osf);
5516:   return(0);
5517: }

5519: /*
5520:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5521:  * This supports MPIAIJ and MAIJ
5522:  * */
5523: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5524: {
5525:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5526:   Mat_SeqAIJ            *p_oth;
5527:   Mat_SeqAIJ            *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5528:   IS                    rows,map;
5529:   PetscHMapI            hamp;
5530:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5531:   MPI_Comm              comm;
5532:   PetscSF               sf,osf;
5533:   PetscBool             has;
5534:   PetscErrorCode        ierr;

5537:   PetscObjectGetComm((PetscObject)A,&comm);
5538:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5539:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5540:    *  and then create a submatrix (that often is an overlapping matrix)
5541:    * */
5542:   if (reuse == MAT_INITIAL_MATRIX) {
5543:     /* Use a hash table to figure out unique keys */
5544:     PetscHMapICreate(&hamp);
5545:     PetscHMapIResize(hamp,a->B->cmap->n);
5546:     PetscCalloc1(a->B->cmap->n,&mapping);
5547:     count = 0;
5548:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5549:     for (i=0;i<a->B->cmap->n;i++) {
5550:       key  = a->garray[i]/dof;
5551:       PetscHMapIHas(hamp,key,&has);
5552:       if (!has) {
5553:         mapping[i] = count;
5554:         PetscHMapISet(hamp,key,count++);
5555:       } else {
5556:         /* Current 'i' has the same value the previous step */
5557:         mapping[i] = count-1;
5558:       }
5559:     }
5560:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5561:     PetscHMapIGetSize(hamp,&htsize);
5562:     if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5563:     PetscCalloc1(htsize,&rowindices);
5564:     off = 0;
5565:     PetscHMapIGetKeys(hamp,&off,rowindices);
5566:     PetscHMapIDestroy(&hamp);
5567:     PetscSortInt(htsize,rowindices);
5568:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5569:     /* In case, the matrix was already created but users want to recreate the matrix */
5570:     MatDestroy(P_oth);
5571:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5572:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5573:     ISDestroy(&map);
5574:     ISDestroy(&rows);
5575:   } else if (reuse == MAT_REUSE_MATRIX) {
5576:     /* If matrix was already created, we simply update values using SF objects
5577:      * that as attached to the matrix ealier.
5578:      *  */
5579:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5580:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5581:     if (!sf || !osf) SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet");
5582:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5583:     /* Update values in place */
5584:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5585:     PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5586:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a,MPI_REPLACE);
5587:     PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a,MPI_REPLACE);
5588:   } else SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type");
5589:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5590:   return(0);
5591: }

5593: /*@C
5594:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5596:     Collective on Mat

5598:    Input Parameters:
5599: +    A - the first matrix in mpiaij format
5600: .    B - the second matrix in mpiaij format
5601: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5603:    Input/Output Parameters:
5604: +    rowb - index sets of rows of B to extract (or NULL), modified on output
5605: -    colb - index sets of columns of B to extract (or NULL), modified on output

5607:    Output Parameter:
5608: .    B_seq - the sequential matrix generated

5610:     Level: developer

5612: @*/
5613: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5614: {
5615:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5617:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5618:   IS             isrowb,iscolb;
5619:   Mat            *bseq=NULL;

5622:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5623:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5624:   }
5625:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5627:   if (scall == MAT_INITIAL_MATRIX) {
5628:     start = A->cmap->rstart;
5629:     cmap  = a->garray;
5630:     nzA   = a->A->cmap->n;
5631:     nzB   = a->B->cmap->n;
5632:     PetscMalloc1(nzA+nzB, &idx);
5633:     ncols = 0;
5634:     for (i=0; i<nzB; i++) {  /* row < local row index */
5635:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5636:       else break;
5637:     }
5638:     imark = i;
5639:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5640:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5641:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5642:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5643:   } else {
5644:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5645:     isrowb  = *rowb; iscolb = *colb;
5646:     PetscMalloc1(1,&bseq);
5647:     bseq[0] = *B_seq;
5648:   }
5649:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5650:   *B_seq = bseq[0];
5651:   PetscFree(bseq);
5652:   if (!rowb) {
5653:     ISDestroy(&isrowb);
5654:   } else {
5655:     *rowb = isrowb;
5656:   }
5657:   if (!colb) {
5658:     ISDestroy(&iscolb);
5659:   } else {
5660:     *colb = iscolb;
5661:   }
5662:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5663:   return(0);
5664: }

5666: /*
5667:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5668:     of the OFF-DIAGONAL portion of local A

5670:     Collective on Mat

5672:    Input Parameters:
5673: +    A,B - the matrices in mpiaij format
5674: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5676:    Output Parameter:
5677: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5678: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5679: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5680: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5682:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5683:      for this matrix. This is not desirable..

5685:     Level: developer

5687: */
5688: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5689: {
5690:   PetscErrorCode         ierr;
5691:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5692:   Mat_SeqAIJ             *b_oth;
5693:   VecScatter             ctx;
5694:   MPI_Comm               comm;
5695:   const PetscMPIInt      *rprocs,*sprocs;
5696:   const PetscInt         *srow,*rstarts,*sstarts;
5697:   PetscInt               *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5698:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = NULL,*sstartsj,len;
5699:   PetscScalar            *b_otha,*bufa,*bufA,*vals = NULL;
5700:   MPI_Request            *reqs = NULL,*rwaits = NULL,*swaits = NULL;
5701:   PetscMPIInt            size,tag,rank,nreqs;

5704:   PetscObjectGetComm((PetscObject)A,&comm);
5705:   MPI_Comm_size(comm,&size);

5707:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5708:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5709:   }
5710:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5711:   MPI_Comm_rank(comm,&rank);

5713:   if (size == 1) {
5714:     startsj_s = NULL;
5715:     bufa_ptr  = NULL;
5716:     *B_oth    = NULL;
5717:     return(0);
5718:   }

5720:   ctx = a->Mvctx;
5721:   tag = ((PetscObject)ctx)->tag;

5723:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5724:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5725:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5726:   PetscMPIIntCast(nsends+nrecvs,&nreqs);
5727:   PetscMalloc1(nreqs,&reqs);
5728:   rwaits = reqs;
5729:   swaits = reqs + nrecvs;

5731:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5732:   if (scall == MAT_INITIAL_MATRIX) {
5733:     /* i-array */
5734:     /*---------*/
5735:     /*  post receives */
5736:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5737:     for (i=0; i<nrecvs; i++) {
5738:       rowlen = rvalues + rstarts[i]*rbs;
5739:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5740:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5741:     }

5743:     /* pack the outgoing message */
5744:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5746:     sstartsj[0] = 0;
5747:     rstartsj[0] = 0;
5748:     len         = 0; /* total length of j or a array to be sent */
5749:     if (nsends) {
5750:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5751:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5752:     }
5753:     for (i=0; i<nsends; i++) {
5754:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5755:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5756:       for (j=0; j<nrows; j++) {
5757:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5758:         for (l=0; l<sbs; l++) {
5759:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5761:           rowlen[j*sbs+l] = ncols;

5763:           len += ncols;
5764:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5765:         }
5766:         k++;
5767:       }
5768:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5770:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5771:     }
5772:     /* recvs and sends of i-array are completed */
5773:     if (nreqs) {MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);}
5774:     PetscFree(svalues);

5776:     /* allocate buffers for sending j and a arrays */
5777:     PetscMalloc1(len+1,&bufj);
5778:     PetscMalloc1(len+1,&bufa);

5780:     /* create i-array of B_oth */
5781:     PetscMalloc1(aBn+2,&b_othi);

5783:     b_othi[0] = 0;
5784:     len       = 0; /* total length of j or a array to be received */
5785:     k         = 0;
5786:     for (i=0; i<nrecvs; i++) {
5787:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5788:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5789:       for (j=0; j<nrows; j++) {
5790:         b_othi[k+1] = b_othi[k] + rowlen[j];
5791:         PetscIntSumError(rowlen[j],len,&len);
5792:         k++;
5793:       }
5794:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5795:     }
5796:     PetscFree(rvalues);

5798:     /* allocate space for j and a arrrays of B_oth */
5799:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5800:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5802:     /* j-array */
5803:     /*---------*/
5804:     /*  post receives of j-array */
5805:     for (i=0; i<nrecvs; i++) {
5806:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5807:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5808:     }

5810:     /* pack the outgoing message j-array */
5811:     if (nsends) k = sstarts[0];
5812:     for (i=0; i<nsends; i++) {
5813:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5814:       bufJ  = bufj+sstartsj[i];
5815:       for (j=0; j<nrows; j++) {
5816:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5817:         for (ll=0; ll<sbs; ll++) {
5818:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5819:           for (l=0; l<ncols; l++) {
5820:             *bufJ++ = cols[l];
5821:           }
5822:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5823:         }
5824:       }
5825:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5826:     }

5828:     /* recvs and sends of j-array are completed */
5829:     if (nreqs) {MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);}
5830:   } else if (scall == MAT_REUSE_MATRIX) {
5831:     sstartsj = *startsj_s;
5832:     rstartsj = *startsj_r;
5833:     bufa     = *bufa_ptr;
5834:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5835:     b_otha   = b_oth->a;
5836: #if defined(PETSC_HAVE_DEVICE)
5837:     (*B_oth)->offloadmask = PETSC_OFFLOAD_CPU;
5838: #endif
5839:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not possess an object container");

5841:   /* a-array */
5842:   /*---------*/
5843:   /*  post receives of a-array */
5844:   for (i=0; i<nrecvs; i++) {
5845:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5846:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5847:   }

5849:   /* pack the outgoing message a-array */
5850:   if (nsends) k = sstarts[0];
5851:   for (i=0; i<nsends; i++) {
5852:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5853:     bufA  = bufa+sstartsj[i];
5854:     for (j=0; j<nrows; j++) {
5855:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5856:       for (ll=0; ll<sbs; ll++) {
5857:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5858:         for (l=0; l<ncols; l++) {
5859:           *bufA++ = vals[l];
5860:         }
5861:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5862:       }
5863:     }
5864:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5865:   }
5866:   /* recvs and sends of a-array are completed */
5867:   if (nreqs) {MPI_Waitall(nreqs,reqs,MPI_STATUSES_IGNORE);}
5868:   PetscFree(reqs);

5870:   if (scall == MAT_INITIAL_MATRIX) {
5871:     /* put together the new matrix */
5872:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5874:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5875:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5876:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5877:     b_oth->free_a  = PETSC_TRUE;
5878:     b_oth->free_ij = PETSC_TRUE;
5879:     b_oth->nonew   = 0;

5881:     PetscFree(bufj);
5882:     if (!startsj_s || !bufa_ptr) {
5883:       PetscFree2(sstartsj,rstartsj);
5884:       PetscFree(bufa_ptr);
5885:     } else {
5886:       *startsj_s = sstartsj;
5887:       *startsj_r = rstartsj;
5888:       *bufa_ptr  = bufa;
5889:     }
5890:   }

5892:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5893:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5894:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5895:   return(0);
5896: }

5898: /*@C
5899:   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.

5901:   Not Collective

5903:   Input Parameter:
5904: . A - The matrix in mpiaij format

5906:   Output Parameters:
5907: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5908: . colmap - A map from global column index to local index into lvec
5909: - multScatter - A scatter from the argument of a matrix-vector product to lvec

5911:   Level: developer

5913: @*/
5914: #if defined(PETSC_USE_CTABLE)
5915: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5916: #else
5917: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5918: #endif
5919: {
5920:   Mat_MPIAIJ *a;

5927:   a = (Mat_MPIAIJ*) A->data;
5928:   if (lvec) *lvec = a->lvec;
5929:   if (colmap) *colmap = a->colmap;
5930:   if (multScatter) *multScatter = a->Mvctx;
5931:   return(0);
5932: }

5934: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5935: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5936: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5937: #if defined(PETSC_HAVE_MKL_SPARSE)
5938: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5939: #endif
5940: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat,MatType,MatReuse,Mat*);
5941: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5942: #if defined(PETSC_HAVE_ELEMENTAL)
5943: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5944: #endif
5945: #if defined(PETSC_HAVE_SCALAPACK)
5946: PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat,MatType,MatReuse,Mat*);
5947: #endif
5948: #if defined(PETSC_HAVE_HYPRE)
5949: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5950: #endif
5951: #if defined(PETSC_HAVE_CUDA)
5952: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
5953: #endif
5954: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
5955: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat,MatType,MatReuse,Mat*);
5956: #endif
5957: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5958: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5959: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);

5961: /*
5962:     Computes (B'*A')' since computing B*A directly is untenable

5964:                n                       p                          p
5965:         [             ]       [             ]         [                 ]
5966:       m [      A      ]  *  n [       B     ]   =   m [         C       ]
5967:         [             ]       [             ]         [                 ]

5969: */
5970: static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5971: {
5973:   Mat            At,Bt,Ct;

5976:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5977:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5978:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&Ct);
5979:   MatDestroy(&At);
5980:   MatDestroy(&Bt);
5981:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5982:   MatDestroy(&Ct);
5983:   return(0);
5984: }

5986: static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat C)
5987: {
5989:   PetscBool      cisdense;

5992:   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5993:   MatSetSizes(C,A->rmap->n,B->cmap->n,A->rmap->N,B->cmap->N);
5994:   MatSetBlockSizesFromMats(C,A,B);
5995:   PetscObjectTypeCompareAny((PetscObject)C,&cisdense,MATMPIDENSE,MATMPIDENSECUDA,"");
5996:   if (!cisdense) {
5997:     MatSetType(C,((PetscObject)A)->type_name);
5998:   }
5999:   MatSetUp(C);

6001:   C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
6002:   return(0);
6003: }

6005: /* ----------------------------------------------------------------*/
6006: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
6007: {
6008:   Mat_Product *product = C->product;
6009:   Mat         A = product->A,B=product->B;

6012:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
6013:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);

6015:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
6016:   C->ops->productsymbolic = MatProductSymbolic_AB;
6017:   return(0);
6018: }

6020: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
6021: {
6023:   Mat_Product    *product = C->product;

6026:   if (product->type == MATPRODUCT_AB) {
6027:     MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C);
6028:   }
6029:   return(0);
6030: }
6031: /* ----------------------------------------------------------------*/

6033: /*MC
6034:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

6036:    Options Database Keys:
6037: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

6039:    Level: beginner

6041:    Notes:
6042:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
6043:     in this case the values associated with the rows and columns one passes in are set to zero
6044:     in the matrix

6046:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
6047:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

6049: .seealso: MatCreateAIJ()
6050: M*/

6052: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6053: {
6054:   Mat_MPIAIJ     *b;
6056:   PetscMPIInt    size;

6059:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

6061:   PetscNewLog(B,&b);
6062:   B->data       = (void*)b;
6063:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
6064:   B->assembled  = PETSC_FALSE;
6065:   B->insertmode = NOT_SET_VALUES;
6066:   b->size       = size;

6068:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

6070:   /* build cache for off array entries formed */
6071:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

6073:   b->donotstash  = PETSC_FALSE;
6074:   b->colmap      = NULL;
6075:   b->garray      = NULL;
6076:   b->roworiented = PETSC_TRUE;

6078:   /* stuff used for matrix vector multiply */
6079:   b->lvec  = NULL;
6080:   b->Mvctx = NULL;

6082:   /* stuff for MatGetRow() */
6083:   b->rowindices   = NULL;
6084:   b->rowvalues    = NULL;
6085:   b->getrowactive = PETSC_FALSE;

6087:   /* flexible pointer used in CUSPARSE classes */
6088:   b->spptr = NULL;

6090:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
6091:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
6092:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
6093:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
6094:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
6095:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
6096:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
6097:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
6098:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
6099:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
6100: #if defined(PETSC_HAVE_CUDA)
6101:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcusparse_C",MatConvert_MPIAIJ_MPIAIJCUSPARSE);
6102: #endif
6103: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6104:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijkokkos_C",MatConvert_MPIAIJ_MPIAIJKokkos);
6105: #endif
6106: #if defined(PETSC_HAVE_MKL_SPARSE)
6107:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
6108: #endif
6109:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
6110:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpibaij_C",MatConvert_MPIAIJ_MPIBAIJ);
6111:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
6112:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpidense_C",MatConvert_MPIAIJ_MPIDense);
6113: #if defined(PETSC_HAVE_ELEMENTAL)
6114:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
6115: #endif
6116: #if defined(PETSC_HAVE_SCALAPACK)
6117:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_scalapack_C",MatConvert_AIJ_ScaLAPACK);
6118: #endif
6119:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
6120:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
6121: #if defined(PETSC_HAVE_HYPRE)
6122:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
6123:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",MatProductSetFromOptions_Transpose_AIJ_AIJ);
6124: #endif
6125:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_is_mpiaij_C",MatProductSetFromOptions_IS_XAIJ);
6126:   PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpiaij_C",MatProductSetFromOptions_MPIAIJ);
6127:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
6128:   return(0);
6129: }

6131: /*@C
6132:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
6133:          and "off-diagonal" part of the matrix in CSR format.

6135:    Collective

6137:    Input Parameters:
6138: +  comm - MPI communicator
6139: .  m - number of local rows (Cannot be PETSC_DECIDE)
6140: .  n - This value should be the same as the local size used in creating the
6141:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
6142:        calculated if N is given) For square matrices n is almost always m.
6143: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
6144: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
6145: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6146: .   j - column indices, which must be local, i.e., based off the start column of the diagonal portion
6147: .   a - matrix values
6148: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6149: .   oj - column indices, which must be global, representing global columns in the MPIAIJ matrix
6150: -   oa - matrix values

6152:    Output Parameter:
6153: .   mat - the matrix

6155:    Level: advanced

6157:    Notes:
6158:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6159:        must free the arrays once the matrix has been destroyed and not before.

6161:        The i and j indices are 0 based

6163:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

6165:        This sets local rows and cannot be used to set off-processor values.

6167:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6168:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6169:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6170:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6171:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
6172:        communication if it is known that only local entries will be set.

6174: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
6175:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
6176: @*/
6177: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
6178: {
6180:   Mat_MPIAIJ     *maij;

6183:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
6184:   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
6185:   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
6186:   MatCreate(comm,mat);
6187:   MatSetSizes(*mat,m,n,M,N);
6188:   MatSetType(*mat,MATMPIAIJ);
6189:   maij = (Mat_MPIAIJ*) (*mat)->data;

6191:   (*mat)->preallocated = PETSC_TRUE;

6193:   PetscLayoutSetUp((*mat)->rmap);
6194:   PetscLayoutSetUp((*mat)->cmap);

6196:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6197:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

6199:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6200:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6201:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6202:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6203:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6204:   return(0);
6205: }

6207: /*
6208:     Special version for direct calls from Fortran
6209: */
6210: #include <petsc/private/fortranimpl.h>

6212: /* Change these macros so can be used in void function */
6213: #undef CHKERRQ
6214: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
6215: #undef SETERRQ2
6216: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
6217: #undef SETERRQ3
6218: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6219: #undef SETERRQ
6220: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)

6222: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6223: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6224: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6225: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6226: #else
6227: #endif
6228: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6229: {
6230:   Mat            mat  = *mmat;
6231:   PetscInt       m    = *mm, n = *mn;
6232:   InsertMode     addv = *maddv;
6233:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
6234:   PetscScalar    value;

6237:   MatCheckPreallocated(mat,1);
6238:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
6239:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6240:   {
6241:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
6242:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6243:     PetscBool roworiented = aij->roworiented;

6245:     /* Some Variables required in the macro */
6246:     Mat        A                    = aij->A;
6247:     Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
6248:     PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6249:     MatScalar  *aa                  = a->a;
6250:     PetscBool  ignorezeroentries    = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6251:     Mat        B                    = aij->B;
6252:     Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
6253:     PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6254:     MatScalar  *ba                  = b->a;
6255:     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
6256:      * cannot use "#if defined" inside a macro. */
6257:     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

6259:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6260:     PetscInt  nonew = a->nonew;
6261:     MatScalar *ap1,*ap2;

6264:     for (i=0; i<m; i++) {
6265:       if (im[i] < 0) continue;
6266:       if (PetscUnlikelyDebug(im[i] >= mat->rmap->N)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6267:       if (im[i] >= rstart && im[i] < rend) {
6268:         row      = im[i] - rstart;
6269:         lastcol1 = -1;
6270:         rp1      = aj + ai[row];
6271:         ap1      = aa + ai[row];
6272:         rmax1    = aimax[row];
6273:         nrow1    = ailen[row];
6274:         low1     = 0;
6275:         high1    = nrow1;
6276:         lastcol2 = -1;
6277:         rp2      = bj + bi[row];
6278:         ap2      = ba + bi[row];
6279:         rmax2    = bimax[row];
6280:         nrow2    = bilen[row];
6281:         low2     = 0;
6282:         high2    = nrow2;

6284:         for (j=0; j<n; j++) {
6285:           if (roworiented) value = v[i*n+j];
6286:           else value = v[i+j*m];
6287:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
6288:           if (in[j] >= cstart && in[j] < cend) {
6289:             col = in[j] - cstart;
6290:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6291: #if defined(PETSC_HAVE_DEVICE)
6292:             if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
6293: #endif
6294:           } else if (in[j] < 0) continue;
6295:           else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
6296:             /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6297:             SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
6298:           } else {
6299:             if (mat->was_assembled) {
6300:               if (!aij->colmap) {
6301:                 MatCreateColmap_MPIAIJ_Private(mat);
6302:               }
6303: #if defined(PETSC_USE_CTABLE)
6304:               PetscTableFind(aij->colmap,in[j]+1,&col);
6305:               col--;
6306: #else
6307:               col = aij->colmap[in[j]] - 1;
6308: #endif
6309:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6310:                 MatDisAssemble_MPIAIJ(mat);
6311:                 col  =  in[j];
6312:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6313:                 B        = aij->B;
6314:                 b        = (Mat_SeqAIJ*)B->data;
6315:                 bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6316:                 rp2      = bj + bi[row];
6317:                 ap2      = ba + bi[row];
6318:                 rmax2    = bimax[row];
6319:                 nrow2    = bilen[row];
6320:                 low2     = 0;
6321:                 high2    = nrow2;
6322:                 bm       = aij->B->rmap->n;
6323:                 ba       = b->a;
6324:                 inserted = PETSC_FALSE;
6325:               }
6326:             } else col = in[j];
6327:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6328: #if defined(PETSC_HAVE_DEVICE)
6329:             if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
6330: #endif
6331:           }
6332:         }
6333:       } else if (!aij->donotstash) {
6334:         if (roworiented) {
6335:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6336:         } else {
6337:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6338:         }
6339:       }
6340:     }
6341:   }
6342:   PetscFunctionReturnVoid();
6343: }

6345: typedef struct {
6346:   Mat       *mp;    /* intermediate products */
6347:   PetscBool *mptmp; /* is the intermediate product temporary ? */
6348:   PetscInt  cp;     /* number of intermediate products */

6350:   /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */
6351:   PetscInt    *startsj_s,*startsj_r;
6352:   PetscScalar *bufa;
6353:   Mat         P_oth;

6355:   /* may take advantage of merging product->B */
6356:   Mat Bloc; /* B-local by merging diag and off-diag */

6358:   /* cusparse does not have support to split between symbolic and numeric phases.
6359:      When api_user is true, we don't need to update the numerical values
6360:      of the temporary storage */
6361:   PetscBool reusesym;

6363:   /* support for COO values insertion */
6364:   PetscScalar  *coo_v,*coo_w; /* store on-process and off-process COO scalars, and used as MPI recv/send buffers respectively */
6365:   PetscInt     **own; /* own[i] points to address of on-process COO indices for Mat mp[i] */
6366:   PetscInt     **off; /* off[i] points to address of off-process COO indices for Mat mp[i] */
6367:   PetscBool    hasoffproc; /* if true, have off-process values insertion (i.e. AtB or PtAP) */
6368:   PetscSF      sf; /* used for non-local values insertion and memory malloc */
6369:   PetscMemType mtype;

6371:   /* customization */
6372:   PetscBool abmerge;
6373:   PetscBool P_oth_bind;
6374: } MatMatMPIAIJBACKEND;

6376: PetscErrorCode MatDestroy_MatMatMPIAIJBACKEND(void *data)
6377: {
6378:   MatMatMPIAIJBACKEND *mmdata = (MatMatMPIAIJBACKEND*)data;
6379:   PetscInt            i;
6380:   PetscErrorCode      ierr;

6383:   PetscFree2(mmdata->startsj_s,mmdata->startsj_r);
6384:   PetscFree(mmdata->bufa);
6385:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_v);
6386:   PetscSFFree(mmdata->sf,mmdata->mtype,mmdata->coo_w);
6387:   MatDestroy(&mmdata->P_oth);
6388:   MatDestroy(&mmdata->Bloc);
6389:   PetscSFDestroy(&mmdata->sf);
6390:   for (i = 0; i < mmdata->cp; i++) {
6391:     MatDestroy(&mmdata->mp[i]);
6392:   }
6393:   PetscFree2(mmdata->mp,mmdata->mptmp);
6394:   PetscFree(mmdata->own[0]);
6395:   PetscFree(mmdata->own);
6396:   PetscFree(mmdata->off[0]);
6397:   PetscFree(mmdata->off);
6398:   PetscFree(mmdata);
6399:   return(0);
6400: }

6402: static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
6403: {
6404:   PetscErrorCode (*f)(Mat,PetscInt,const PetscInt[],PetscScalar[]);

6408:   PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",&f);
6409:   if (f) {
6410:     (*f)(A,n,idx,v);
6411:   } else {
6412:     const PetscScalar *vv;

6414:     MatSeqAIJGetArrayRead(A,&vv);
6415:     if (n && idx) {
6416:       PetscScalar    *w = v;
6417:       const PetscInt *oi = idx;
6418:       PetscInt       j;

6420:       for (j = 0; j < n; j++) *w++ = vv[*oi++];
6421:     } else {
6422:       PetscArraycpy(v,vv,n);
6423:     }
6424:     MatSeqAIJRestoreArrayRead(A,&vv);
6425:   }
6426:   return(0);
6427: }

6429: static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C)
6430: {
6431:   MatMatMPIAIJBACKEND *mmdata;
6432:   PetscInt            i,n_d,n_o;
6433:   PetscErrorCode      ierr;

6436:   MatCheckProduct(C,1);
6437:   if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
6438:   mmdata = (MatMatMPIAIJBACKEND*)C->product->data;
6439:   if (!mmdata->reusesym) { /* update temporary matrices */
6440:     if (mmdata->P_oth) {
6441:       MatGetBrowsOfAoCols_MPIAIJ(C->product->A,C->product->B,MAT_REUSE_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6442:     }
6443:     if (mmdata->Bloc) {
6444:       MatMPIAIJGetLocalMatMerge(C->product->B,MAT_REUSE_MATRIX,NULL,&mmdata->Bloc);
6445:     }
6446:   }
6447:   mmdata->reusesym = PETSC_FALSE;

6449:   for (i = 0; i < mmdata->cp; i++) {
6450:     if (!mmdata->mp[i]->ops->productnumeric) SETERRQ1(PetscObjectComm((PetscObject)mmdata->mp[i]),PETSC_ERR_PLIB,"Missing numeric op for %s",MatProductTypes[mmdata->mp[i]->product->type]);
6451:     (*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i]);
6452:   }
6453:   for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) {
6454:     PetscInt noff = mmdata->off[i+1] - mmdata->off[i];

6456:     if (mmdata->mptmp[i]) continue;
6457:     if (noff) {
6458:       PetscInt nown = mmdata->own[i+1] - mmdata->own[i];

6460:       MatSeqAIJCopySubArray(mmdata->mp[i],noff,mmdata->off[i],mmdata->coo_w + n_o);
6461:       MatSeqAIJCopySubArray(mmdata->mp[i],nown,mmdata->own[i],mmdata->coo_v + n_d);
6462:       n_o += noff;
6463:       n_d += nown;
6464:     } else {
6465:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mmdata->mp[i]->data;

6467:       MatSeqAIJCopySubArray(mmdata->mp[i],mm->nz,NULL,mmdata->coo_v + n_d);
6468:       n_d += mm->nz;
6469:     }
6470:   }
6471:   if (mmdata->hasoffproc) { /* offprocess insertion */
6472:     PetscSFGatherBegin(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6473:     PetscSFGatherEnd(mmdata->sf,MPIU_SCALAR,mmdata->coo_w,mmdata->coo_v+n_d);
6474:   }
6475:   MatSetValuesCOO(C,mmdata->coo_v,INSERT_VALUES);
6476:   return(0);
6477: }

6479: /* Support for Pt * A, A * P, or Pt * A * P */
6480: #define MAX_NUMBER_INTERMEDIATE 4
6481: PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C)
6482: {
6483:   Mat_Product            *product = C->product;
6484:   Mat                    A,P,mp[MAX_NUMBER_INTERMEDIATE]; /* A, P and a series of intermediate matrices */
6485:   Mat_MPIAIJ             *a,*p;
6486:   MatMatMPIAIJBACKEND    *mmdata;
6487:   ISLocalToGlobalMapping P_oth_l2g = NULL;
6488:   IS                     glob = NULL;
6489:   const char             *prefix;
6490:   char                   pprefix[256];
6491:   const PetscInt         *globidx,*P_oth_idx;
6492:   PetscInt               i,j,cp,m,n,M,N,ncoo,ncoo_d,ncoo_o,ncoo_oown,*coo_i,*coo_j;
6493:   PetscInt               cmapt[MAX_NUMBER_INTERMEDIATE],rmapt[MAX_NUMBER_INTERMEDIATE]; /* col/row map type for each Mat in mp[]. */
6494:                                                                                         /* type-0: consecutive, start from 0; type-1: consecutive with */
6495:                                                                                         /* a base offset; type-2: sparse with a local to global map table */
6496:   const PetscInt         *cmapa[MAX_NUMBER_INTERMEDIATE],*rmapa[MAX_NUMBER_INTERMEDIATE]; /* col/row local to global map array (table) for type-2 map type */

6498:   MatProductType         ptype;
6499:   PetscBool              mptmp[MAX_NUMBER_INTERMEDIATE],hasoffproc = PETSC_FALSE,iscuda,iskokk;
6500:   PetscMPIInt            size;
6501:   PetscErrorCode         ierr;

6504:   MatCheckProduct(C,1);
6505:   if (product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
6506:   ptype = product->type;
6507:   if (product->A->symmetric && ptype == MATPRODUCT_AtB) {
6508:     ptype = MATPRODUCT_AB;
6509:     product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
6510:   }
6511:   switch (ptype) {
6512:   case MATPRODUCT_AB:
6513:     A = product->A;
6514:     P = product->B;
6515:     m = A->rmap->n;
6516:     n = P->cmap->n;
6517:     M = A->rmap->N;
6518:     N = P->cmap->N;
6519:     hasoffproc = PETSC_FALSE; /* will not scatter mat product values to other processes */
6520:     break;
6521:   case MATPRODUCT_AtB:
6522:     P = product->A;
6523:     A = product->B;
6524:     m = P->cmap->n;
6525:     n = A->cmap->n;
6526:     M = P->cmap->N;
6527:     N = A->cmap->N;
6528:     hasoffproc = PETSC_TRUE;
6529:     break;
6530:   case MATPRODUCT_PtAP:
6531:     A = product->A;
6532:     P = product->B;
6533:     m = P->cmap->n;
6534:     n = P->cmap->n;
6535:     M = P->cmap->N;
6536:     N = P->cmap->N;
6537:     hasoffproc = PETSC_TRUE;
6538:     break;
6539:   default:
6540:     SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6541:   }
6542:   MPI_Comm_size(PetscObjectComm((PetscObject)C),&size);
6543:   if (size == 1) hasoffproc = PETSC_FALSE;

6545:   /* defaults */
6546:   for (i=0;i<MAX_NUMBER_INTERMEDIATE;i++) {
6547:     mp[i]    = NULL;
6548:     mptmp[i] = PETSC_FALSE;
6549:     rmapt[i] = -1;
6550:     cmapt[i] = -1;
6551:     rmapa[i] = NULL;
6552:     cmapa[i] = NULL;
6553:   }

6555:   /* customization */
6556:   PetscNew(&mmdata);
6557:   mmdata->reusesym = product->api_user;
6558:   if (ptype == MATPRODUCT_AB) {
6559:     if (product->api_user) {
6560:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
6561:       PetscOptionsBool("-matmatmult_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6562:       PetscOptionsBool("-matmatmult_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6563:       PetscOptionsEnd();
6564:     } else {
6565:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
6566:       PetscOptionsBool("-matproduct_ab_backend_mergeB","Merge product->B local matrices","MatMatMult",mmdata->abmerge,&mmdata->abmerge,NULL);
6567:       PetscOptionsBool("-matproduct_ab_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6568:       PetscOptionsEnd();
6569:     }
6570:   } else if (ptype == MATPRODUCT_PtAP) {
6571:     if (product->api_user) {
6572:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
6573:       PetscOptionsBool("-matptap_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6574:       PetscOptionsEnd();
6575:     } else {
6576:       PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
6577:       PetscOptionsBool("-matproduct_ptap_backend_pothbind","Bind P_oth to CPU","MatBindToCPU",mmdata->P_oth_bind,&mmdata->P_oth_bind,NULL);
6578:       PetscOptionsEnd();
6579:     }
6580:   }
6581:   a = (Mat_MPIAIJ*)A->data;
6582:   p = (Mat_MPIAIJ*)P->data;
6583:   MatSetSizes(C,m,n,M,N);
6584:   PetscLayoutSetUp(C->rmap);
6585:   PetscLayoutSetUp(C->cmap);
6586:   MatSetType(C,((PetscObject)A)->type_name);
6587:   MatGetOptionsPrefix(C,&prefix);

6589:   cp   = 0;
6590:   switch (ptype) {
6591:   case MATPRODUCT_AB: /* A * P */
6592:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);

6594:     /* A_diag * P_local (merged or not) */
6595:     if (mmdata->abmerge) { /* P's diagonal and off-diag blocks are merged to one matrix, then multiplied by A_diag */
6596:       /* P is product->B */
6597:       MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6598:       MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6599:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6600:       MatProductSetFill(mp[cp],product->fill);
6601:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6602:       MatSetOptionsPrefix(mp[cp],prefix);
6603:       MatAppendOptionsPrefix(mp[cp],pprefix);
6604:       mp[cp]->product->api_user = product->api_user;
6605:       MatProductSetFromOptions(mp[cp]);
6606:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6607:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6608:       ISGetIndices(glob,&globidx);
6609:       rmapt[cp] = 1;
6610:       cmapt[cp] = 2;
6611:       cmapa[cp] = globidx;
6612:       mptmp[cp] = PETSC_FALSE;
6613:       cp++;
6614:     } else { /* A_diag * P_diag and A_diag * P_off */
6615:       MatProductCreate(a->A,p->A,NULL,&mp[cp]);
6616:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6617:       MatProductSetFill(mp[cp],product->fill);
6618:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6619:       MatSetOptionsPrefix(mp[cp],prefix);
6620:       MatAppendOptionsPrefix(mp[cp],pprefix);
6621:       mp[cp]->product->api_user = product->api_user;
6622:       MatProductSetFromOptions(mp[cp]);
6623:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6624:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6625:       rmapt[cp] = 1;
6626:       cmapt[cp] = 1;
6627:       mptmp[cp] = PETSC_FALSE;
6628:       cp++;
6629:       MatProductCreate(a->A,p->B,NULL,&mp[cp]);
6630:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6631:       MatProductSetFill(mp[cp],product->fill);
6632:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6633:       MatSetOptionsPrefix(mp[cp],prefix);
6634:       MatAppendOptionsPrefix(mp[cp],pprefix);
6635:       mp[cp]->product->api_user = product->api_user;
6636:       MatProductSetFromOptions(mp[cp]);
6637:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6638:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6639:       rmapt[cp] = 1;
6640:       cmapt[cp] = 2;
6641:       cmapa[cp] = p->garray;
6642:       mptmp[cp] = PETSC_FALSE;
6643:       cp++;
6644:     }

6646:     /* A_off * P_other */
6647:     if (mmdata->P_oth) {
6648:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g); /* make P_oth use local col ids */
6649:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6650:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6651:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6652:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6653:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6654:       MatProductSetFill(mp[cp],product->fill);
6655:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6656:       MatSetOptionsPrefix(mp[cp],prefix);
6657:       MatAppendOptionsPrefix(mp[cp],pprefix);
6658:       mp[cp]->product->api_user = product->api_user;
6659:       MatProductSetFromOptions(mp[cp]);
6660:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6661:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6662:       rmapt[cp] = 1;
6663:       cmapt[cp] = 2;
6664:       cmapa[cp] = P_oth_idx;
6665:       mptmp[cp] = PETSC_FALSE;
6666:       cp++;
6667:     }
6668:     break;

6670:   case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */
6671:     /* A is product->B */
6672:     MatMPIAIJGetLocalMatMerge(A,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6673:     if (A == P) { /* when A==P, we can take advantage of the already merged mmdata->Bloc */
6674:       MatProductCreate(mmdata->Bloc,mmdata->Bloc,NULL,&mp[cp]);
6675:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6676:       MatProductSetFill(mp[cp],product->fill);
6677:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6678:       MatSetOptionsPrefix(mp[cp],prefix);
6679:       MatAppendOptionsPrefix(mp[cp],pprefix);
6680:       mp[cp]->product->api_user = product->api_user;
6681:       MatProductSetFromOptions(mp[cp]);
6682:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6683:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6684:       ISGetIndices(glob,&globidx);
6685:       rmapt[cp] = 2;
6686:       rmapa[cp] = globidx;
6687:       cmapt[cp] = 2;
6688:       cmapa[cp] = globidx;
6689:       mptmp[cp] = PETSC_FALSE;
6690:       cp++;
6691:     } else {
6692:       MatProductCreate(p->A,mmdata->Bloc,NULL,&mp[cp]);
6693:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6694:       MatProductSetFill(mp[cp],product->fill);
6695:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6696:       MatSetOptionsPrefix(mp[cp],prefix);
6697:       MatAppendOptionsPrefix(mp[cp],pprefix);
6698:       mp[cp]->product->api_user = product->api_user;
6699:       MatProductSetFromOptions(mp[cp]);
6700:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6701:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6702:       ISGetIndices(glob,&globidx);
6703:       rmapt[cp] = 1;
6704:       cmapt[cp] = 2;
6705:       cmapa[cp] = globidx;
6706:       mptmp[cp] = PETSC_FALSE;
6707:       cp++;
6708:       MatProductCreate(p->B,mmdata->Bloc,NULL,&mp[cp]);
6709:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6710:       MatProductSetFill(mp[cp],product->fill);
6711:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6712:       MatSetOptionsPrefix(mp[cp],prefix);
6713:       MatAppendOptionsPrefix(mp[cp],pprefix);
6714:       mp[cp]->product->api_user = product->api_user;
6715:       MatProductSetFromOptions(mp[cp]);
6716:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6717:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6718:       rmapt[cp] = 2;
6719:       rmapa[cp] = p->garray;
6720:       cmapt[cp] = 2;
6721:       cmapa[cp] = globidx;
6722:       mptmp[cp] = PETSC_FALSE;
6723:       cp++;
6724:     }
6725:     break;
6726:   case MATPRODUCT_PtAP:
6727:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&mmdata->startsj_s,&mmdata->startsj_r,&mmdata->bufa,&mmdata->P_oth);
6728:     /* P is product->B */
6729:     MatMPIAIJGetLocalMatMerge(P,MAT_INITIAL_MATRIX,&glob,&mmdata->Bloc);
6730:     MatProductCreate(a->A,mmdata->Bloc,NULL,&mp[cp]);
6731:     MatProductSetType(mp[cp],MATPRODUCT_PtAP);
6732:     MatProductSetFill(mp[cp],product->fill);
6733:     PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6734:     MatSetOptionsPrefix(mp[cp],prefix);
6735:     MatAppendOptionsPrefix(mp[cp],pprefix);
6736:     mp[cp]->product->api_user = product->api_user;
6737:     MatProductSetFromOptions(mp[cp]);
6738:     if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6739:     (*mp[cp]->ops->productsymbolic)(mp[cp]);
6740:     ISGetIndices(glob,&globidx);
6741:     rmapt[cp] = 2;
6742:     rmapa[cp] = globidx;
6743:     cmapt[cp] = 2;
6744:     cmapa[cp] = globidx;
6745:     mptmp[cp] = PETSC_FALSE;
6746:     cp++;
6747:     if (mmdata->P_oth) {
6748:       MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth,&P_oth_l2g);
6749:       ISLocalToGlobalMappingGetIndices(P_oth_l2g,&P_oth_idx);
6750:       MatSetType(mmdata->P_oth,((PetscObject)(a->B))->type_name);
6751:       MatBindToCPU(mmdata->P_oth,mmdata->P_oth_bind);
6752:       MatProductCreate(a->B,mmdata->P_oth,NULL,&mp[cp]);
6753:       MatProductSetType(mp[cp],MATPRODUCT_AB);
6754:       MatProductSetFill(mp[cp],product->fill);
6755:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6756:       MatSetOptionsPrefix(mp[cp],prefix);
6757:       MatAppendOptionsPrefix(mp[cp],pprefix);
6758:       mp[cp]->product->api_user = product->api_user;
6759:       MatProductSetFromOptions(mp[cp]);
6760:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6761:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6762:       mptmp[cp] = PETSC_TRUE;
6763:       cp++;
6764:       MatProductCreate(mmdata->Bloc,mp[1],NULL,&mp[cp]);
6765:       MatProductSetType(mp[cp],MATPRODUCT_AtB);
6766:       MatProductSetFill(mp[cp],product->fill);
6767:       PetscSNPrintf(pprefix,sizeof(pprefix),"backend_p%D_",cp);
6768:       MatSetOptionsPrefix(mp[cp],prefix);
6769:       MatAppendOptionsPrefix(mp[cp],pprefix);
6770:       mp[cp]->product->api_user = product->api_user;
6771:       MatProductSetFromOptions(mp[cp]);
6772:       if (!mp[cp]->ops->productsymbolic) SETERRQ1(PetscObjectComm((PetscObject)mp[cp]),PETSC_ERR_PLIB,"Missing symbolic op for %s",MatProductTypes[mp[cp]->product->type]);
6773:       (*mp[cp]->ops->productsymbolic)(mp[cp]);
6774:       rmapt[cp] = 2;
6775:       rmapa[cp] = globidx;
6776:       cmapt[cp] = 2;
6777:       cmapa[cp] = P_oth_idx;
6778:       mptmp[cp] = PETSC_FALSE;
6779:       cp++;
6780:     }
6781:     break;
6782:   default:
6783:     SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for product type %s",MatProductTypes[ptype]);
6784:   }
6785:   /* sanity check */
6786:   if (size > 1) for (i = 0; i < cp; i++) if (rmapt[i] == 2 && !hasoffproc) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Unexpected offproc map type for product %D",i);

6788:   PetscMalloc2(cp,&mmdata->mp,cp,&mmdata->mptmp);
6789:   for (i = 0; i < cp; i++) {
6790:     mmdata->mp[i]    = mp[i];
6791:     mmdata->mptmp[i] = mptmp[i];
6792:   }
6793:   mmdata->cp = cp;
6794:   C->product->data       = mmdata;
6795:   C->product->destroy    = MatDestroy_MatMatMPIAIJBACKEND;
6796:   C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND;

6798:   /* memory type */
6799:   mmdata->mtype = PETSC_MEMTYPE_HOST;
6800:   PetscObjectTypeCompareAny((PetscObject)C,&iscuda,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,"");
6801:   PetscObjectTypeCompareAny((PetscObject)C,&iskokk,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,"");
6802:   if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA;
6803:   // enable the line below MatSeqAIJCopySubArray_SeqAIJKokkos is implemented
6804:   //else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_DEVICE;

6806:   /* prepare coo coordinates for values insertion */

6808:   /* count total nonzeros of those intermediate seqaij Mats
6809:     ncoo_d:    # of nonzeros of matrices that do not have offproc entries
6810:     ncoo_o:    # of nonzeros (of matrices that might have offproc entries) that will be inserted to remote procs
6811:     ncoo_oown: # of nonzeros (of matrices that might have offproc entries) that will be inserted locally
6812:   */
6813:   for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) {
6814:     Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
6815:     if (mptmp[cp]) continue;
6816:     if (rmapt[cp] == 2 && hasoffproc) { /* the rows need to be scatter to all processes (might include self) */
6817:       const PetscInt *rmap = rmapa[cp];
6818:       const PetscInt mr = mp[cp]->rmap->n;
6819:       const PetscInt rs = C->rmap->rstart;
6820:       const PetscInt re = C->rmap->rend;
6821:       const PetscInt *ii  = mm->i;
6822:       for (i = 0; i < mr; i++) {
6823:         const PetscInt gr = rmap[i];
6824:         const PetscInt nz = ii[i+1] - ii[i];
6825:         if (gr < rs || gr >= re) ncoo_o += nz; /* this row is offproc */
6826:         else ncoo_oown += nz; /* this row is local */
6827:       }
6828:     } else ncoo_d += mm->nz;
6829:   }

6831:   /*
6832:     ncoo: total number of nonzeros (including those inserted by remote procs) belonging to this proc

6834:     ncoo = ncoo_d + ncoo_oown + ncoo2, which ncoo2 is number of nonzeros inserted to me by other procs.

6836:     off[0] points to a big index array, which is shared by off[1,2,...]. Similarily, for own[0].

6838:     off[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert to others
6839:     own[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert locally
6840:     so, off[p+1]-off[p] is the number of nonzeros that mp[p] will send to others.

6842:     coo_i/j/v[]: [ncoo] row/col/val of nonzeros belonging to this proc.
6843:     Ex. coo_i[]: the beginning part (of size ncoo_d + ncoo_oown) stores i of local nonzeros, and the remaing part stores i of nonzeros I will receive.
6844:   */
6845:   PetscCalloc1(mmdata->cp+1,&mmdata->off); /* +1 to make a csr-like data structure */
6846:   PetscCalloc1(mmdata->cp+1,&mmdata->own);

6848:   /* gather (i,j) of nonzeros inserted by remote procs */
6849:   if (hasoffproc) {
6850:     PetscSF  msf;
6851:     PetscInt ncoo2,*coo_i2,*coo_j2;

6853:     PetscMalloc1(ncoo_o,&mmdata->off[0]);
6854:     PetscMalloc1(ncoo_oown,&mmdata->own[0]);
6855:     PetscMalloc2(ncoo_o,&coo_i,ncoo_o,&coo_j); /* to collect (i,j) of entries to be sent to others */

6857:     for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) {
6858:       Mat_SeqAIJ *mm = (Mat_SeqAIJ*)mp[cp]->data;
6859:       PetscInt   *idxoff = mmdata->off[cp];
6860:       PetscInt   *idxown = mmdata->own[cp];
6861:       if (!mptmp[cp] && rmapt[cp] == 2) { /* row map is sparse */
6862:         const PetscInt *rmap = rmapa[cp];
6863:         const PetscInt *cmap = cmapa[cp];
6864:         const PetscInt *ii  = mm->i;
6865:         PetscInt       *coi = coo_i + ncoo_o;
6866:         PetscInt       *coj = coo_j + ncoo_o;
6867:         const PetscInt mr = mp[cp]->rmap->n;
6868:         const PetscInt rs = C->rmap->rstart;
6869:         const PetscInt re = C->rmap->rend;
6870:         const PetscInt cs = C->cmap->rstart;
6871:         for (i = 0; i < mr; i++) {
6872:           const PetscInt *jj = mm->j + ii[i];
6873:           const PetscInt gr  = rmap[i];
6874:           const PetscInt nz  = ii[i+1] - ii[i];
6875:           if (gr < rs || gr >= re) { /* this is an offproc row */
6876:             for (j = ii[i]; j < ii[i+1]; j++) {
6877:               *coi++ = gr;
6878:               *idxoff++ = j;
6879:             }
6880:             if (!cmapt[cp]) { /* already global */
6881:               for (j = 0; j < nz; j++) *coj++ = jj[j];
6882:             } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
6883:               for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
6884:             } else { /* offdiag */
6885:               for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
6886:             }
6887:             ncoo_o += nz;
6888:           } else { /* this is a local row */
6889:             for (j = ii[i]; j < ii[i+1]; j++) *idxown++ = j;
6890:           }
6891:         }
6892:       }
6893:       mmdata->off[cp + 1] = idxoff;
6894:       mmdata->own[cp + 1] = idxown;
6895:     }

6897:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
6898:     PetscSFSetGraphLayout(mmdata->sf,C->rmap,ncoo_o/*nleaves*/,NULL/*ilocal*/,PETSC_OWN_POINTER,coo_i);
6899:     PetscSFGetMultiSF(mmdata->sf,&msf);
6900:     PetscSFGetGraph(msf,&ncoo2/*nroots*/,NULL,NULL,NULL);
6901:     ncoo = ncoo_d + ncoo_oown + ncoo2;
6902:     PetscMalloc2(ncoo,&coo_i2,ncoo,&coo_j2);
6903:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown); /* put (i,j) of remote nonzeros at back */
6904:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_i,coo_i2 + ncoo_d + ncoo_oown);
6905:     PetscSFGatherBegin(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
6906:     PetscSFGatherEnd(mmdata->sf,MPIU_INT,coo_j,coo_j2 + ncoo_d + ncoo_oown);
6907:     PetscFree2(coo_i,coo_j);
6908:     /* allocate MPI send buffer to collect nonzero values to be sent to remote procs */
6909:     PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo_o*sizeof(PetscScalar),(void**)&mmdata->coo_w);
6910:     coo_i = coo_i2;
6911:     coo_j = coo_j2;
6912:   } else { /* no offproc values insertion */
6913:     ncoo = ncoo_d;
6914:     PetscMalloc2(ncoo,&coo_i,ncoo,&coo_j);

6916:     PetscSFCreate(PetscObjectComm((PetscObject)C),&mmdata->sf);
6917:     PetscSFSetGraph(mmdata->sf,0,0,NULL,PETSC_OWN_POINTER,NULL,PETSC_OWN_POINTER);
6918:     PetscSFSetUp(mmdata->sf);
6919:   }
6920:   mmdata->hasoffproc = hasoffproc;

6922:    /* gather (i,j) of nonzeros inserted locally */
6923:   for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) {
6924:     Mat_SeqAIJ     *mm = (Mat_SeqAIJ*)mp[cp]->data;
6925:     PetscInt       *coi = coo_i + ncoo_d;
6926:     PetscInt       *coj = coo_j + ncoo_d;
6927:     const PetscInt *jj  = mm->j;
6928:     const PetscInt *ii  = mm->i;
6929:     const PetscInt *cmap = cmapa[cp];
6930:     const PetscInt *rmap = rmapa[cp];
6931:     const PetscInt mr = mp[cp]->rmap->n;
6932:     const PetscInt rs = C->rmap->rstart;
6933:     const PetscInt re = C->rmap->rend;
6934:     const PetscInt cs = C->cmap->rstart;

6936:     if (mptmp[cp]) continue;
6937:     if (rmapt[cp] == 1) { /* consecutive rows */
6938:       /* fill coo_i */
6939:       for (i = 0; i < mr; i++) {
6940:         const PetscInt gr = i + rs;
6941:         for (j = ii[i]; j < ii[i+1]; j++) coi[j] = gr;
6942:       }
6943:       /* fill coo_j */
6944:       if (!cmapt[cp]) { /* type-0, already global */
6945:         PetscArraycpy(coj,jj,mm->nz);
6946:       } else if (cmapt[cp] == 1) { /* type-1, local to global for consecutive columns of C */
6947:         for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs; /* lid + col start */
6948:       } else { /* type-2, local to global for sparse columns */
6949:         for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]];
6950:       }
6951:       ncoo_d += mm->nz;
6952:     } else if (rmapt[cp] == 2) { /* sparse rows */
6953:       for (i = 0; i < mr; i++) {
6954:         const PetscInt *jj = mm->j + ii[i];
6955:         const PetscInt gr  = rmap[i];
6956:         const PetscInt nz  = ii[i+1] - ii[i];
6957:         if (gr >= rs && gr < re) { /* local rows */
6958:           for (j = ii[i]; j < ii[i+1]; j++) *coi++ = gr;
6959:           if (!cmapt[cp]) { /* type-0, already global */
6960:             for (j = 0; j < nz; j++) *coj++ = jj[j];
6961:           } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
6962:             for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
6963:           } else { /* type-2, local to global for sparse columns */
6964:             for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
6965:           }
6966:           ncoo_d += nz;
6967:         }
6968:       }
6969:     }
6970:   }
6971:   if (glob) {
6972:     ISRestoreIndices(glob,&globidx);
6973:   }
6974:   ISDestroy(&glob);
6975:   if (P_oth_l2g) {
6976:     ISLocalToGlobalMappingRestoreIndices(P_oth_l2g,&P_oth_idx);
6977:   }
6978:   ISLocalToGlobalMappingDestroy(&P_oth_l2g);
6979:   /* allocate an array to store all nonzeros (inserted locally or remotely) belonging to this proc */
6980:   PetscSFMalloc(mmdata->sf,mmdata->mtype,ncoo*sizeof(PetscScalar),(void**)&mmdata->coo_v);

6982:   /* preallocate with COO data */
6983:   MatSetPreallocationCOO(C,ncoo,coo_i,coo_j);
6984:   PetscFree2(coo_i,coo_j);
6985:   return(0);
6986: }

6988: PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat)
6989: {
6990:   Mat_Product    *product = mat->product;
6992: #if defined(PETSC_HAVE_DEVICE)
6993:   PetscBool      match = PETSC_FALSE;
6994:   PetscBool      usecpu = PETSC_FALSE;
6995: #else
6996:   PetscBool      match = PETSC_TRUE;
6997: #endif

7000:   MatCheckProduct(mat,1);
7001: #if defined(PETSC_HAVE_DEVICE)
7002:   if (!product->A->boundtocpu && !product->B->boundtocpu) {
7003:     PetscObjectTypeCompare((PetscObject)product->B,((PetscObject)product->A)->type_name,&match);
7004:   }
7005:   if (match) { /* we can always fallback to the CPU if requested */
7006:     switch (product->type) {
7007:     case MATPRODUCT_AB:
7008:       if (product->api_user) {
7009:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");
7010:         PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
7011:         PetscOptionsEnd();
7012:       } else {
7013:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");
7014:         PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);
7015:         PetscOptionsEnd();
7016:       }
7017:       break;
7018:     case MATPRODUCT_AtB:
7019:       if (product->api_user) {
7020:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");
7021:         PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
7022:         PetscOptionsEnd();
7023:       } else {
7024:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");
7025:         PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);
7026:         PetscOptionsEnd();
7027:       }
7028:       break;
7029:     case MATPRODUCT_PtAP:
7030:       if (product->api_user) {
7031:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");
7032:         PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
7033:         PetscOptionsEnd();
7034:       } else {
7035:         PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");
7036:         PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);
7037:         PetscOptionsEnd();
7038:       }
7039:       break;
7040:     default:
7041:       break;
7042:     }
7043:     match = (PetscBool)!usecpu;
7044:   }
7045: #endif
7046:   if (match) {
7047:     switch (product->type) {
7048:     case MATPRODUCT_AB:
7049:     case MATPRODUCT_AtB:
7050:     case MATPRODUCT_PtAP:
7051:       mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND;
7052:       break;
7053:     default:
7054:       break;
7055:     }
7056:   }
7057:   /* fallback to MPIAIJ ops */
7058:   if (!mat->ops->productsymbolic) {
7059:     MatProductSetFromOptions_MPIAIJ(mat);
7060:   }
7061:   return(0);
7062: }