Actual source code: mpiaij.c

petsc-3.12.5 2020-03-29
Report Typos and Errors
  1:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2:  #include <petsc/private/vecimpl.h>
  3:  #include <petsc/private/vecscatterimpl.h>
  4:  #include <petsc/private/isimpl.h>
  5:  #include <petscblaslapack.h>
  6:  #include <petscsf.h>
  7:  #include <petsc/private/hashmapi.h>

  9: /*MC
 10:    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.

 12:    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
 13:    and MATMPIAIJ otherwise.  As a result, for single process communicators,
 14:   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
 15:   for communicators controlling multiple processes.  It is recommended that you call both of
 16:   the above preallocation routines for simplicity.

 18:    Options Database Keys:
 19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()

 21:   Developer Notes:
 22:     Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
 23:    enough exist.

 25:   Level: beginner

 27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
 28: M*/

 30: /*MC
 31:    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.

 33:    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
 34:    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
 35:    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
 36:   for communicators controlling multiple processes.  It is recommended that you call both of
 37:   the above preallocation routines for simplicity.

 39:    Options Database Keys:
 40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()

 42:   Level: beginner

 44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
 45: M*/

 47: static PetscErrorCode MatPinToCPU_MPIAIJ(Mat A,PetscBool flg)
 48: {
 49:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

 53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
 54:   A->pinnedtocpu = flg;
 55: #endif
 56:   if (a->A) {
 57:     MatPinToCPU(a->A,flg);
 58:   }
 59:   if (a->B) {
 60:     MatPinToCPU(a->B,flg);
 61:   }
 62:   return(0);
 63: }


 66: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
 67: {
 69:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;

 72:   if (mat->A) {
 73:     MatSetBlockSizes(mat->A,rbs,cbs);
 74:     MatSetBlockSizes(mat->B,rbs,1);
 75:   }
 76:   return(0);
 77: }

 79: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
 80: {
 81:   PetscErrorCode  ierr;
 82:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
 83:   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
 84:   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
 85:   const PetscInt  *ia,*ib;
 86:   const MatScalar *aa,*bb;
 87:   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
 88:   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;

 91:   *keptrows = 0;
 92:   ia        = a->i;
 93:   ib        = b->i;
 94:   for (i=0; i<m; i++) {
 95:     na = ia[i+1] - ia[i];
 96:     nb = ib[i+1] - ib[i];
 97:     if (!na && !nb) {
 98:       cnt++;
 99:       goto ok1;
100:     }
101:     aa = a->a + ia[i];
102:     for (j=0; j<na; j++) {
103:       if (aa[j] != 0.0) goto ok1;
104:     }
105:     bb = b->a + ib[i];
106:     for (j=0; j <nb; j++) {
107:       if (bb[j] != 0.0) goto ok1;
108:     }
109:     cnt++;
110: ok1:;
111:   }
112:   MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
113:   if (!n0rows) return(0);
114:   PetscMalloc1(M->rmap->n-cnt,&rows);
115:   cnt  = 0;
116:   for (i=0; i<m; i++) {
117:     na = ia[i+1] - ia[i];
118:     nb = ib[i+1] - ib[i];
119:     if (!na && !nb) continue;
120:     aa = a->a + ia[i];
121:     for (j=0; j<na;j++) {
122:       if (aa[j] != 0.0) {
123:         rows[cnt++] = rstart + i;
124:         goto ok2;
125:       }
126:     }
127:     bb = b->a + ib[i];
128:     for (j=0; j<nb; j++) {
129:       if (bb[j] != 0.0) {
130:         rows[cnt++] = rstart + i;
131:         goto ok2;
132:       }
133:     }
134: ok2:;
135:   }
136:   ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
137:   return(0);
138: }

140: PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
141: {
142:   PetscErrorCode    ierr;
143:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
144:   PetscBool         cong;

147:   MatHasCongruentLayouts(Y,&cong);
148:   if (Y->assembled && cong) {
149:     MatDiagonalSet(aij->A,D,is);
150:   } else {
151:     MatDiagonalSet_Default(Y,D,is);
152:   }
153:   return(0);
154: }

156: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
157: {
158:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
160:   PetscInt       i,rstart,nrows,*rows;

163:   *zrows = NULL;
164:   MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
165:   MatGetOwnershipRange(M,&rstart,NULL);
166:   for (i=0; i<nrows; i++) rows[i] += rstart;
167:   ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
168:   return(0);
169: }

171: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
172: {
174:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)A->data;
175:   PetscInt       i,n,*garray = aij->garray;
176:   Mat_SeqAIJ     *a_aij = (Mat_SeqAIJ*) aij->A->data;
177:   Mat_SeqAIJ     *b_aij = (Mat_SeqAIJ*) aij->B->data;
178:   PetscReal      *work;

181:   MatGetSize(A,NULL,&n);
182:   PetscCalloc1(n,&work);
183:   if (type == NORM_2) {
184:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
185:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
186:     }
187:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
188:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
189:     }
190:   } else if (type == NORM_1) {
191:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
192:       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
193:     }
194:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
195:       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
196:     }
197:   } else if (type == NORM_INFINITY) {
198:     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
199:       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
200:     }
201:     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
202:       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
203:     }

205:   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
206:   if (type == NORM_INFINITY) {
207:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
208:   } else {
209:     MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
210:   }
211:   PetscFree(work);
212:   if (type == NORM_2) {
213:     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
214:   }
215:   return(0);
216: }

218: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
219: {
220:   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
221:   IS              sis,gis;
222:   PetscErrorCode  ierr;
223:   const PetscInt  *isis,*igis;
224:   PetscInt        n,*iis,nsis,ngis,rstart,i;

227:   MatFindOffBlockDiagonalEntries(a->A,&sis);
228:   MatFindNonzeroRows(a->B,&gis);
229:   ISGetSize(gis,&ngis);
230:   ISGetSize(sis,&nsis);
231:   ISGetIndices(sis,&isis);
232:   ISGetIndices(gis,&igis);

234:   PetscMalloc1(ngis+nsis,&iis);
235:   PetscArraycpy(iis,igis,ngis);
236:   PetscArraycpy(iis+ngis,isis,nsis);
237:   n    = ngis + nsis;
238:   PetscSortRemoveDupsInt(&n,iis);
239:   MatGetOwnershipRange(A,&rstart,NULL);
240:   for (i=0; i<n; i++) iis[i] += rstart;
241:   ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);

243:   ISRestoreIndices(sis,&isis);
244:   ISRestoreIndices(gis,&igis);
245:   ISDestroy(&sis);
246:   ISDestroy(&gis);
247:   return(0);
248: }

250: /*
251:     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
252:     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.

254:     Only for square matrices

256:     Used by a preconditioner, hence PETSC_EXTERN
257: */
258: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
259: {
260:   PetscMPIInt    rank,size;
261:   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
263:   Mat            mat;
264:   Mat_SeqAIJ     *gmata;
265:   PetscMPIInt    tag;
266:   MPI_Status     status;
267:   PetscBool      aij;
268:   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;

271:   MPI_Comm_rank(comm,&rank);
272:   MPI_Comm_size(comm,&size);
273:   if (!rank) {
274:     PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
275:     if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
276:   }
277:   if (reuse == MAT_INITIAL_MATRIX) {
278:     MatCreate(comm,&mat);
279:     MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
280:     MatGetBlockSizes(gmat,&bses[0],&bses[1]);
281:     MPI_Bcast(bses,2,MPIU_INT,0,comm);
282:     MatSetBlockSizes(mat,bses[0],bses[1]);
283:     MatSetType(mat,MATAIJ);
284:     PetscMalloc1(size+1,&rowners);
285:     PetscMalloc2(m,&dlens,m,&olens);
286:     MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

288:     rowners[0] = 0;
289:     for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
290:     rstart = rowners[rank];
291:     rend   = rowners[rank+1];
292:     PetscObjectGetNewTag((PetscObject)mat,&tag);
293:     if (!rank) {
294:       gmata = (Mat_SeqAIJ*) gmat->data;
295:       /* send row lengths to all processors */
296:       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
297:       for (i=1; i<size; i++) {
298:         MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
299:       }
300:       /* determine number diagonal and off-diagonal counts */
301:       PetscArrayzero(olens,m);
302:       PetscCalloc1(m,&ld);
303:       jj   = 0;
304:       for (i=0; i<m; i++) {
305:         for (j=0; j<dlens[i]; j++) {
306:           if (gmata->j[jj] < rstart) ld[i]++;
307:           if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
308:           jj++;
309:         }
310:       }
311:       /* send column indices to other processes */
312:       for (i=1; i<size; i++) {
313:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
314:         MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
315:         MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
316:       }

318:       /* send numerical values to other processes */
319:       for (i=1; i<size; i++) {
320:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
321:         MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
322:       }
323:       gmataa = gmata->a;
324:       gmataj = gmata->j;

326:     } else {
327:       /* receive row lengths */
328:       MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
329:       /* receive column indices */
330:       MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
331:       PetscMalloc2(nz,&gmataa,nz,&gmataj);
332:       MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
333:       /* determine number diagonal and off-diagonal counts */
334:       PetscArrayzero(olens,m);
335:       PetscCalloc1(m,&ld);
336:       jj   = 0;
337:       for (i=0; i<m; i++) {
338:         for (j=0; j<dlens[i]; j++) {
339:           if (gmataj[jj] < rstart) ld[i]++;
340:           if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
341:           jj++;
342:         }
343:       }
344:       /* receive numerical values */
345:       PetscArrayzero(gmataa,nz);
346:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
347:     }
348:     /* set preallocation */
349:     for (i=0; i<m; i++) {
350:       dlens[i] -= olens[i];
351:     }
352:     MatSeqAIJSetPreallocation(mat,0,dlens);
353:     MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);

355:     for (i=0; i<m; i++) {
356:       dlens[i] += olens[i];
357:     }
358:     cnt = 0;
359:     for (i=0; i<m; i++) {
360:       row  = rstart + i;
361:       MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
362:       cnt += dlens[i];
363:     }
364:     if (rank) {
365:       PetscFree2(gmataa,gmataj);
366:     }
367:     PetscFree2(dlens,olens);
368:     PetscFree(rowners);

370:     ((Mat_MPIAIJ*)(mat->data))->ld = ld;

372:     *inmat = mat;
373:   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
374:     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
375:     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
376:     mat  = *inmat;
377:     PetscObjectGetNewTag((PetscObject)mat,&tag);
378:     if (!rank) {
379:       /* send numerical values to other processes */
380:       gmata  = (Mat_SeqAIJ*) gmat->data;
381:       MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
382:       gmataa = gmata->a;
383:       for (i=1; i<size; i++) {
384:         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
385:         MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
386:       }
387:       nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
388:     } else {
389:       /* receive numerical values from process 0*/
390:       nz   = Ad->nz + Ao->nz;
391:       PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
392:       MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
393:     }
394:     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
395:     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
396:     ad = Ad->a;
397:     ao = Ao->a;
398:     if (mat->rmap->n) {
399:       i  = 0;
400:       nz = ld[i];                                   PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
401:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
402:     }
403:     for (i=1; i<mat->rmap->n; i++) {
404:       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
405:       nz = Ad->i[i+1] - Ad->i[i];                   PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
406:     }
407:     i--;
408:     if (mat->rmap->n) {
409:       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           PetscArraycpy(ao,gmataa,nz);
410:     }
411:     if (rank) {
412:       PetscFree(gmataarestore);
413:     }
414:   }
415:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
416:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
417:   return(0);
418: }

420: /*
421:   Local utility routine that creates a mapping from the global column
422: number to the local number in the off-diagonal part of the local
423: storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
424: a slightly higher hash table cost; without it it is not scalable (each processor
425: has an order N integer array but is fast to acess.
426: */
427: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
428: {
429:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
431:   PetscInt       n = aij->B->cmap->n,i;

434:   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
435: #if defined(PETSC_USE_CTABLE)
436:   PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
437:   for (i=0; i<n; i++) {
438:     PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
439:   }
440: #else
441:   PetscCalloc1(mat->cmap->N+1,&aij->colmap);
442:   PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
443:   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
444: #endif
445:   return(0);
446: }

448: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
449: { \
450:     if (col <= lastcol1)  low1 = 0;     \
451:     else                 high1 = nrow1; \
452:     lastcol1 = col;\
453:     while (high1-low1 > 5) { \
454:       t = (low1+high1)/2; \
455:       if (rp1[t] > col) high1 = t; \
456:       else              low1  = t; \
457:     } \
458:       for (_i=low1; _i<high1; _i++) { \
459:         if (rp1[_i] > col) break; \
460:         if (rp1[_i] == col) { \
461:           if (addv == ADD_VALUES) { \
462:             ap1[_i] += value;   \
463:             /* Not sure LogFlops will slow dow the code or not */ \
464:             (void)PetscLogFlops(1.0);   \
465:            } \
466:           else                    ap1[_i] = value; \
467:           inserted = PETSC_TRUE; \
468:           goto a_noinsert; \
469:         } \
470:       }  \
471:       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
472:       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
473:       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
474:       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
475:       N = nrow1++ - 1; a->nz++; high1++; \
476:       /* shift up all the later entries in this row */ \
477:       PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
478:       PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
479:       rp1[_i] = col;  \
480:       ap1[_i] = value;  \
481:       A->nonzerostate++;\
482:       a_noinsert: ; \
483:       ailen[row] = nrow1; \
484: }

486: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
487:   { \
488:     if (col <= lastcol2) low2 = 0;                        \
489:     else high2 = nrow2;                                   \
490:     lastcol2 = col;                                       \
491:     while (high2-low2 > 5) {                              \
492:       t = (low2+high2)/2;                                 \
493:       if (rp2[t] > col) high2 = t;                        \
494:       else             low2  = t;                         \
495:     }                                                     \
496:     for (_i=low2; _i<high2; _i++) {                       \
497:       if (rp2[_i] > col) break;                           \
498:       if (rp2[_i] == col) {                               \
499:         if (addv == ADD_VALUES) {                         \
500:           ap2[_i] += value;                               \
501:           (void)PetscLogFlops(1.0);                       \
502:         }                                                 \
503:         else                    ap2[_i] = value;          \
504:         inserted = PETSC_TRUE;                            \
505:         goto b_noinsert;                                  \
506:       }                                                   \
507:     }                                                     \
508:     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
509:     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
510:     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
511:     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
512:     N = nrow2++ - 1; b->nz++; high2++;                    \
513:     /* shift up all the later entries in this row */      \
514:     PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
515:     PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
516:     rp2[_i] = col;                                        \
517:     ap2[_i] = value;                                      \
518:     B->nonzerostate++;                                    \
519:     b_noinsert: ;                                         \
520:     bilen[row] = nrow2;                                   \
521:   }

523: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
524: {
525:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
526:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
528:   PetscInt       l,*garray = mat->garray,diag;

531:   /* code only works for square matrices A */

533:   /* find size of row to the left of the diagonal part */
534:   MatGetOwnershipRange(A,&diag,0);
535:   row  = row - diag;
536:   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
537:     if (garray[b->j[b->i[row]+l]] > diag) break;
538:   }
539:   PetscArraycpy(b->a+b->i[row],v,l);

541:   /* diagonal part */
542:   PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));

544:   /* right of diagonal part */
545:   PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
546: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
547:   if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (l || (a->i[row+1]-a->i[row]) || (b->i[row+1]-b->i[row]-l))) A->offloadmask = PETSC_OFFLOAD_CPU;
548: #endif
549:   return(0);
550: }

552: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
553: {
554:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
555:   PetscScalar    value = 0.0;
557:   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
558:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
559:   PetscBool      roworiented = aij->roworiented;

561:   /* Some Variables required in the macro */
562:   Mat        A                    = aij->A;
563:   Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
564:   PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
565:   MatScalar  *aa                  = a->a;
566:   PetscBool  ignorezeroentries    = a->ignorezeroentries;
567:   Mat        B                    = aij->B;
568:   Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
569:   PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
570:   MatScalar  *ba                  = b->a;
571:   /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
572:    * cannot use "#if defined" inside a macro. */
573:   PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

575:   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
576:   PetscInt  nonew;
577:   MatScalar *ap1,*ap2;

580:   for (i=0; i<m; i++) {
581:     if (im[i] < 0) continue;
582: #if defined(PETSC_USE_DEBUG)
583:     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
584: #endif
585:     if (im[i] >= rstart && im[i] < rend) {
586:       row      = im[i] - rstart;
587:       lastcol1 = -1;
588:       rp1      = aj + ai[row];
589:       ap1      = aa + ai[row];
590:       rmax1    = aimax[row];
591:       nrow1    = ailen[row];
592:       low1     = 0;
593:       high1    = nrow1;
594:       lastcol2 = -1;
595:       rp2      = bj + bi[row];
596:       ap2      = ba + bi[row];
597:       rmax2    = bimax[row];
598:       nrow2    = bilen[row];
599:       low2     = 0;
600:       high2    = nrow2;

602:       for (j=0; j<n; j++) {
603:         if (v)  value = roworiented ? v[i*n+j] : v[i+j*m];
604:         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
605:         if (in[j] >= cstart && in[j] < cend) {
606:           col   = in[j] - cstart;
607:           nonew = a->nonew;
608:           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
609: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
610:           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
611: #endif
612:         } else if (in[j] < 0) continue;
613: #if defined(PETSC_USE_DEBUG)
614:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
615: #endif
616:         else {
617:           if (mat->was_assembled) {
618:             if (!aij->colmap) {
619:               MatCreateColmap_MPIAIJ_Private(mat);
620:             }
621: #if defined(PETSC_USE_CTABLE)
622:             PetscTableFind(aij->colmap,in[j]+1,&col);
623:             col--;
624: #else
625:             col = aij->colmap[in[j]] - 1;
626: #endif
627:             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
628:               MatDisAssemble_MPIAIJ(mat);
629:               col  =  in[j];
630:               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
631:               B        = aij->B;
632:               b        = (Mat_SeqAIJ*)B->data;
633:               bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
634:               rp2      = bj + bi[row];
635:               ap2      = ba + bi[row];
636:               rmax2    = bimax[row];
637:               nrow2    = bilen[row];
638:               low2     = 0;
639:               high2    = nrow2;
640:               bm       = aij->B->rmap->n;
641:               ba       = b->a;
642:               inserted = PETSC_FALSE;
643:             } else if (col < 0) {
644:               if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
645:                 PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
646:               } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
647:             }
648:           } else col = in[j];
649:           nonew = b->nonew;
650:           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
651: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
652:           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
653: #endif
654:         }
655:       }
656:     } else {
657:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
658:       if (!aij->donotstash) {
659:         mat->assembled = PETSC_FALSE;
660:         if (roworiented) {
661:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
662:         } else {
663:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
664:         }
665:       }
666:     }
667:   }
668:   return(0);
669: }

671: /*
672:     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
673:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
674:     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
675: */
676: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
677: {
678:   Mat_MPIAIJ     *aij        = (Mat_MPIAIJ*)mat->data;
679:   Mat            A           = aij->A; /* diagonal part of the matrix */
680:   Mat            B           = aij->B; /* offdiagonal part of the matrix */
681:   Mat_SeqAIJ     *a          = (Mat_SeqAIJ*)A->data;
682:   Mat_SeqAIJ     *b          = (Mat_SeqAIJ*)B->data;
683:   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,col;
684:   PetscInt       *ailen      = a->ilen,*aj = a->j;
685:   PetscInt       *bilen      = b->ilen,*bj = b->j;
686:   PetscInt       am          = aij->A->rmap->n,j;
687:   PetscInt       diag_so_far = 0,dnz;
688:   PetscInt       offd_so_far = 0,onz;

691:   /* Iterate over all rows of the matrix */
692:   for (j=0; j<am; j++) {
693:     dnz = onz = 0;
694:     /*  Iterate over all non-zero columns of the current row */
695:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
696:       /* If column is in the diagonal */
697:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
698:         aj[diag_so_far++] = mat_j[col] - cstart;
699:         dnz++;
700:       } else { /* off-diagonal entries */
701:         bj[offd_so_far++] = mat_j[col];
702:         onz++;
703:       }
704:     }
705:     ailen[j] = dnz;
706:     bilen[j] = onz;
707:   }
708:   return(0);
709: }

711: /*
712:     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
713:     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
714:     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
715:     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
716:     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
717: */
718: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
719: {
720:   Mat_MPIAIJ     *aij   = (Mat_MPIAIJ*)mat->data;
721:   Mat            A      = aij->A; /* diagonal part of the matrix */
722:   Mat            B      = aij->B; /* offdiagonal part of the matrix */
723:   Mat_SeqAIJ     *aijd  =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
724:   Mat_SeqAIJ     *a     = (Mat_SeqAIJ*)A->data;
725:   Mat_SeqAIJ     *b     = (Mat_SeqAIJ*)B->data;
726:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend;
727:   PetscInt       *ailen = a->ilen,*aj = a->j;
728:   PetscInt       *bilen = b->ilen,*bj = b->j;
729:   PetscInt       am     = aij->A->rmap->n,j;
730:   PetscInt       *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
731:   PetscInt       col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
732:   PetscScalar    *aa = a->a,*ba = b->a;

735:   /* Iterate over all rows of the matrix */
736:   for (j=0; j<am; j++) {
737:     dnz_row = onz_row = 0;
738:     rowstart_offd = full_offd_i[j];
739:     rowstart_diag = full_diag_i[j];
740:     /*  Iterate over all non-zero columns of the current row */
741:     for (col=mat_i[j]; col<mat_i[j+1]; col++) {
742:       /* If column is in the diagonal */
743:       if (mat_j[col] >= cstart && mat_j[col] < cend) {
744:         aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
745:         aa[rowstart_diag+dnz_row] = mat_a[col];
746:         dnz_row++;
747:       } else { /* off-diagonal entries */
748:         bj[rowstart_offd+onz_row] = mat_j[col];
749:         ba[rowstart_offd+onz_row] = mat_a[col];
750:         onz_row++;
751:       }
752:     }
753:     ailen[j] = dnz_row;
754:     bilen[j] = onz_row;
755:   }
756:   return(0);
757: }

759: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
760: {
761:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
763:   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
764:   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;

767:   for (i=0; i<m; i++) {
768:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
769:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
770:     if (idxm[i] >= rstart && idxm[i] < rend) {
771:       row = idxm[i] - rstart;
772:       for (j=0; j<n; j++) {
773:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
774:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
775:         if (idxn[j] >= cstart && idxn[j] < cend) {
776:           col  = idxn[j] - cstart;
777:           MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
778:         } else {
779:           if (!aij->colmap) {
780:             MatCreateColmap_MPIAIJ_Private(mat);
781:           }
782: #if defined(PETSC_USE_CTABLE)
783:           PetscTableFind(aij->colmap,idxn[j]+1,&col);
784:           col--;
785: #else
786:           col = aij->colmap[idxn[j]] - 1;
787: #endif
788:           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
789:           else {
790:             MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
791:           }
792:         }
793:       }
794:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
795:   }
796:   return(0);
797: }

799: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);

801: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
802: {
803:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
805:   PetscInt       nstash,reallocs;

808:   if (aij->donotstash || mat->nooffprocentries) return(0);

810:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
811:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
812:   PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
813:   return(0);
814: }

816: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
817: {
818:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
819:   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)aij->A->data;
821:   PetscMPIInt    n;
822:   PetscInt       i,j,rstart,ncols,flg;
823:   PetscInt       *row,*col;
824:   PetscBool      other_disassembled;
825:   PetscScalar    *val;

827:   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */

830:   if (!aij->donotstash && !mat->nooffprocentries) {
831:     while (1) {
832:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
833:       if (!flg) break;

835:       for (i=0; i<n; ) {
836:         /* Now identify the consecutive vals belonging to the same row */
837:         for (j=i,rstart=row[j]; j<n; j++) {
838:           if (row[j] != rstart) break;
839:         }
840:         if (j < n) ncols = j-i;
841:         else       ncols = n-i;
842:         /* Now assemble all these values with a single function call */
843:         MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);

845:         i = j;
846:       }
847:     }
848:     MatStashScatterEnd_Private(&mat->stash);
849:   }
850: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
851:   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
852: #endif
853:   MatAssemblyBegin(aij->A,mode);
854:   MatAssemblyEnd(aij->A,mode);

856:   /* determine if any processor has disassembled, if so we must
857:      also disassemble ourself, in order that we may reassemble. */
858:   /*
859:      if nonzero structure of submatrix B cannot change then we know that
860:      no processor disassembled thus we can skip this stuff
861:   */
862:   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
863:     MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
864:     if (mat->was_assembled && !other_disassembled) {
865: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
866:       aij->B->offloadmask = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
867: #endif
868:       MatDisAssemble_MPIAIJ(mat);
869:     }
870:   }
871:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
872:     MatSetUpMultiply_MPIAIJ(mat);
873:   }
874:   MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
875: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
876:   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
877: #endif
878:   MatAssemblyBegin(aij->B,mode);
879:   MatAssemblyEnd(aij->B,mode);

881:   PetscFree2(aij->rowvalues,aij->rowindices);

883:   aij->rowvalues = 0;

885:   VecDestroy(&aij->diag);
886:   if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;

888:   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
889:   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
890:     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
891:     MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
892:   }
893: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
894:   mat->offloadmask = PETSC_OFFLOAD_BOTH;
895: #endif
896:   return(0);
897: }

899: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
900: {
901:   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;

905:   MatZeroEntries(l->A);
906:   MatZeroEntries(l->B);
907:   return(0);
908: }

910: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
911: {
912:   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *) A->data;
913:   PetscObjectState sA, sB;
914:   PetscInt        *lrows;
915:   PetscInt         r, len;
916:   PetscBool        cong, lch, gch;
917:   PetscErrorCode   ierr;

920:   /* get locally owned rows */
921:   MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
922:   MatHasCongruentLayouts(A,&cong);
923:   /* fix right hand side if needed */
924:   if (x && b) {
925:     const PetscScalar *xx;
926:     PetscScalar       *bb;

928:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
929:     VecGetArrayRead(x, &xx);
930:     VecGetArray(b, &bb);
931:     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
932:     VecRestoreArrayRead(x, &xx);
933:     VecRestoreArray(b, &bb);
934:   }

936:   sA = mat->A->nonzerostate;
937:   sB = mat->B->nonzerostate;

939:   if (diag != 0.0 && cong) {
940:     MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
941:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
942:   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
943:     Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
944:     Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
945:     PetscInt   nnwA, nnwB;
946:     PetscBool  nnzA, nnzB;

948:     nnwA = aijA->nonew;
949:     nnwB = aijB->nonew;
950:     nnzA = aijA->keepnonzeropattern;
951:     nnzB = aijB->keepnonzeropattern;
952:     if (!nnzA) {
953:       PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
954:       aijA->nonew = 0;
955:     }
956:     if (!nnzB) {
957:       PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
958:       aijB->nonew = 0;
959:     }
960:     /* Must zero here before the next loop */
961:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
962:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
963:     for (r = 0; r < len; ++r) {
964:       const PetscInt row = lrows[r] + A->rmap->rstart;
965:       if (row >= A->cmap->N) continue;
966:       MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
967:     }
968:     aijA->nonew = nnwA;
969:     aijB->nonew = nnwB;
970:   } else {
971:     MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
972:     MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
973:   }
974:   PetscFree(lrows);
975:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
976:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

978:   /* reduce nonzerostate */
979:   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
980:   MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
981:   if (gch) A->nonzerostate++;
982:   return(0);
983: }

985: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
986: {
987:   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
988:   PetscErrorCode    ierr;
989:   PetscMPIInt       n = A->rmap->n;
990:   PetscInt          i,j,r,m,p = 0,len = 0;
991:   PetscInt          *lrows,*owners = A->rmap->range;
992:   PetscSFNode       *rrows;
993:   PetscSF           sf;
994:   const PetscScalar *xx;
995:   PetscScalar       *bb,*mask;
996:   Vec               xmask,lmask;
997:   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
998:   const PetscInt    *aj, *ii,*ridx;
999:   PetscScalar       *aa;

1002:   /* Create SF where leaves are input rows and roots are owned rows */
1003:   PetscMalloc1(n, &lrows);
1004:   for (r = 0; r < n; ++r) lrows[r] = -1;
1005:   PetscMalloc1(N, &rrows);
1006:   for (r = 0; r < N; ++r) {
1007:     const PetscInt idx   = rows[r];
1008:     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
1009:     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
1010:       PetscLayoutFindOwner(A->rmap,idx,&p);
1011:     }
1012:     rrows[r].rank  = p;
1013:     rrows[r].index = rows[r] - owners[p];
1014:   }
1015:   PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1016:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1017:   /* Collect flags for rows to be zeroed */
1018:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1019:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1020:   PetscSFDestroy(&sf);
1021:   /* Compress and put in row numbers */
1022:   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1023:   /* zero diagonal part of matrix */
1024:   MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
1025:   /* handle off diagonal part of matrix */
1026:   MatCreateVecs(A,&xmask,NULL);
1027:   VecDuplicate(l->lvec,&lmask);
1028:   VecGetArray(xmask,&bb);
1029:   for (i=0; i<len; i++) bb[lrows[i]] = 1;
1030:   VecRestoreArray(xmask,&bb);
1031:   VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1032:   VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1033:   VecDestroy(&xmask);
1034:   if (x && b) { /* this code is buggy when the row and column layout don't match */
1035:     PetscBool cong;

1037:     MatHasCongruentLayouts(A,&cong);
1038:     if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
1039:     VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1040:     VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1041:     VecGetArrayRead(l->lvec,&xx);
1042:     VecGetArray(b,&bb);
1043:   }
1044:   VecGetArray(lmask,&mask);
1045:   /* remove zeroed rows of off diagonal matrix */
1046:   ii = aij->i;
1047:   for (i=0; i<len; i++) {
1048:     PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
1049:   }
1050:   /* loop over all elements of off process part of matrix zeroing removed columns*/
1051:   if (aij->compressedrow.use) {
1052:     m    = aij->compressedrow.nrows;
1053:     ii   = aij->compressedrow.i;
1054:     ridx = aij->compressedrow.rindex;
1055:     for (i=0; i<m; i++) {
1056:       n  = ii[i+1] - ii[i];
1057:       aj = aij->j + ii[i];
1058:       aa = aij->a + ii[i];

1060:       for (j=0; j<n; j++) {
1061:         if (PetscAbsScalar(mask[*aj])) {
1062:           if (b) bb[*ridx] -= *aa*xx[*aj];
1063:           *aa = 0.0;
1064:         }
1065:         aa++;
1066:         aj++;
1067:       }
1068:       ridx++;
1069:     }
1070:   } else { /* do not use compressed row format */
1071:     m = l->B->rmap->n;
1072:     for (i=0; i<m; i++) {
1073:       n  = ii[i+1] - ii[i];
1074:       aj = aij->j + ii[i];
1075:       aa = aij->a + ii[i];
1076:       for (j=0; j<n; j++) {
1077:         if (PetscAbsScalar(mask[*aj])) {
1078:           if (b) bb[i] -= *aa*xx[*aj];
1079:           *aa = 0.0;
1080:         }
1081:         aa++;
1082:         aj++;
1083:       }
1084:     }
1085:   }
1086:   if (x && b) {
1087:     VecRestoreArray(b,&bb);
1088:     VecRestoreArrayRead(l->lvec,&xx);
1089:   }
1090:   VecRestoreArray(lmask,&mask);
1091:   VecDestroy(&lmask);
1092:   PetscFree(lrows);

1094:   /* only change matrix nonzero state if pattern was allowed to be changed */
1095:   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1096:     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1097:     MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1098:   }
1099:   return(0);
1100: }

1102: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1103: {
1104:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1106:   PetscInt       nt;
1107:   VecScatter     Mvctx = a->Mvctx;

1110:   VecGetLocalSize(xx,&nt);
1111:   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);

1113:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1114:   (*a->A->ops->mult)(a->A,xx,yy);
1115:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1116:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1117:   return(0);
1118: }

1120: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1121: {
1122:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1126:   MatMultDiagonalBlock(a->A,bb,xx);
1127:   return(0);
1128: }

1130: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1131: {
1132:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1134:   VecScatter     Mvctx = a->Mvctx;

1137:   if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1138:   VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1139:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
1140:   VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1141:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1142:   return(0);
1143: }

1145: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1146: {
1147:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1151:   /* do nondiagonal part */
1152:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1153:   /* do local part */
1154:   (*a->A->ops->multtranspose)(a->A,xx,yy);
1155:   /* add partial results together */
1156:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1157:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1158:   return(0);
1159: }

1161: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1162: {
1163:   MPI_Comm       comm;
1164:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1165:   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1166:   IS             Me,Notme;
1168:   PetscInt       M,N,first,last,*notme,i;
1169:   PetscBool      lf;
1170:   PetscMPIInt    size;

1173:   /* Easy test: symmetric diagonal block */
1174:   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1175:   MatIsTranspose(Adia,Bdia,tol,&lf);
1176:   MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1177:   if (!*f) return(0);
1178:   PetscObjectGetComm((PetscObject)Amat,&comm);
1179:   MPI_Comm_size(comm,&size);
1180:   if (size == 1) return(0);

1182:   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1183:   MatGetSize(Amat,&M,&N);
1184:   MatGetOwnershipRange(Amat,&first,&last);
1185:   PetscMalloc1(N-last+first,&notme);
1186:   for (i=0; i<first; i++) notme[i] = i;
1187:   for (i=last; i<M; i++) notme[i-last+first] = i;
1188:   ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1189:   ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1190:   MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1191:   Aoff = Aoffs[0];
1192:   MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1193:   Boff = Boffs[0];
1194:   MatIsTranspose(Aoff,Boff,tol,f);
1195:   MatDestroyMatrices(1,&Aoffs);
1196:   MatDestroyMatrices(1,&Boffs);
1197:   ISDestroy(&Me);
1198:   ISDestroy(&Notme);
1199:   PetscFree(notme);
1200:   return(0);
1201: }

1203: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool  *f)
1204: {

1208:   MatIsTranspose_MPIAIJ(A,A,tol,f);
1209:   return(0);
1210: }

1212: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1213: {
1214:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1218:   /* do nondiagonal part */
1219:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1220:   /* do local part */
1221:   (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1222:   /* add partial results together */
1223:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1224:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1225:   return(0);
1226: }

1228: /*
1229:   This only works correctly for square matrices where the subblock A->A is the
1230:    diagonal block
1231: */
1232: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1233: {
1235:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1238:   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1239:   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1240:   MatGetDiagonal(a->A,v);
1241:   return(0);
1242: }

1244: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1245: {
1246:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1250:   MatScale(a->A,aa);
1251:   MatScale(a->B,aa);
1252:   return(0);
1253: }

1255: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1256: {
1257:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

1261: #if defined(PETSC_USE_LOG)
1262:   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1263: #endif
1264:   MatStashDestroy_Private(&mat->stash);
1265:   VecDestroy(&aij->diag);
1266:   MatDestroy(&aij->A);
1267:   MatDestroy(&aij->B);
1268: #if defined(PETSC_USE_CTABLE)
1269:   PetscTableDestroy(&aij->colmap);
1270: #else
1271:   PetscFree(aij->colmap);
1272: #endif
1273:   PetscFree(aij->garray);
1274:   VecDestroy(&aij->lvec);
1275:   VecScatterDestroy(&aij->Mvctx);
1276:   if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1277:   PetscFree2(aij->rowvalues,aij->rowindices);
1278:   PetscFree(aij->ld);
1279:   PetscFree(mat->data);

1281:   PetscObjectChangeTypeName((PetscObject)mat,0);
1282:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1283:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1284:   PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1285:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1286:   PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1287:   PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1288:   PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1289:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1290: #if defined(PETSC_HAVE_ELEMENTAL)
1291:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1292: #endif
1293: #if defined(PETSC_HAVE_HYPRE)
1294:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1295:   PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);
1296: #endif
1297:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1298:   PetscObjectComposeFunction((PetscObject)mat,"MatPtAP_is_mpiaij_C",NULL);
1299:   return(0);
1300: }

1302: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1303: {
1304:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1305:   Mat_SeqAIJ     *A   = (Mat_SeqAIJ*)aij->A->data;
1306:   Mat_SeqAIJ     *B   = (Mat_SeqAIJ*)aij->B->data;
1308:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
1309:   int            fd;
1310:   PetscInt       nz,header[4],*row_lengths,*range=0,rlen,i;
1311:   PetscInt       nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1312:   PetscScalar    *column_values;
1313:   PetscInt       message_count,flowcontrolcount;
1314:   FILE           *file;

1317:   MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1318:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1319:   nz   = A->nz + B->nz;
1320:   PetscViewerBinaryGetDescriptor(viewer,&fd);
1321:   if (!rank) {
1322:     header[0] = MAT_FILE_CLASSID;
1323:     header[1] = mat->rmap->N;
1324:     header[2] = mat->cmap->N;

1326:     MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1327:     PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1328:     /* get largest number of rows any processor has */
1329:     rlen  = mat->rmap->n;
1330:     range = mat->rmap->range;
1331:     for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1332:   } else {
1333:     MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1334:     rlen = mat->rmap->n;
1335:   }

1337:   /* load up the local row counts */
1338:   PetscMalloc1(rlen+1,&row_lengths);
1339:   for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];

1341:   /* store the row lengths to the file */
1342:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1343:   if (!rank) {
1344:     PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);
1345:     for (i=1; i<size; i++) {
1346:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1347:       rlen = range[i+1] - range[i];
1348:       MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1349:       PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);
1350:     }
1351:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1352:   } else {
1353:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1354:     MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1355:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1356:   }
1357:   PetscFree(row_lengths);

1359:   /* load up the local column indices */
1360:   nzmax = nz; /* th processor needs space a largest processor needs */
1361:   MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1362:   PetscMalloc1(nzmax+1,&column_indices);
1363:   cnt   = 0;
1364:   for (i=0; i<mat->rmap->n; i++) {
1365:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1366:       if ((col = garray[B->j[j]]) > cstart) break;
1367:       column_indices[cnt++] = col;
1368:     }
1369:     for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1370:     for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1371:   }
1372:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1374:   /* store the column indices to the file */
1375:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1376:   if (!rank) {
1377:     MPI_Status status;
1378:     PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1379:     for (i=1; i<size; i++) {
1380:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1381:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1382:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1383:       MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));
1384:       PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);
1385:     }
1386:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1387:   } else {
1388:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1389:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1390:     MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1391:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1392:   }
1393:   PetscFree(column_indices);

1395:   /* load up the local column values */
1396:   PetscMalloc1(nzmax+1,&column_values);
1397:   cnt  = 0;
1398:   for (i=0; i<mat->rmap->n; i++) {
1399:     for (j=B->i[i]; j<B->i[i+1]; j++) {
1400:       if (garray[B->j[j]] > cstart) break;
1401:       column_values[cnt++] = B->a[j];
1402:     }
1403:     for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1404:     for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1405:   }
1406:   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);

1408:   /* store the column values to the file */
1409:   PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1410:   if (!rank) {
1411:     MPI_Status status;
1412:     PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1413:     for (i=1; i<size; i++) {
1414:       PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1415:       MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1416:       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1417:       MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));
1418:       PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);
1419:     }
1420:     PetscViewerFlowControlEndMaster(viewer,&message_count);
1421:   } else {
1422:     PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1423:     MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1424:     MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1425:     PetscViewerFlowControlEndWorker(viewer,&message_count);
1426:   }
1427:   PetscFree(column_values);

1429:   PetscViewerBinaryGetInfoPointer(viewer,&file);
1430:   if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1431:   return(0);
1432: }

1434:  #include <petscdraw.h>
1435: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1436: {
1437:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1438:   PetscErrorCode    ierr;
1439:   PetscMPIInt       rank = aij->rank,size = aij->size;
1440:   PetscBool         isdraw,iascii,isbinary;
1441:   PetscViewer       sviewer;
1442:   PetscViewerFormat format;

1445:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1446:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1447:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1448:   if (iascii) {
1449:     PetscViewerGetFormat(viewer,&format);
1450:     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1451:       PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1452:       PetscMalloc1(size,&nz);
1453:       MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1454:       for (i=0; i<(PetscInt)size; i++) {
1455:         nmax = PetscMax(nmax,nz[i]);
1456:         nmin = PetscMin(nmin,nz[i]);
1457:         navg += nz[i];
1458:       }
1459:       PetscFree(nz);
1460:       navg = navg/size;
1461:       PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D  avg %D  max %D\n",nmin,navg,nmax);
1462:       return(0);
1463:     }
1464:     PetscViewerGetFormat(viewer,&format);
1465:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1466:       MatInfo   info;
1467:       PetscBool inodes;

1469:       MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1470:       MatGetInfo(mat,MAT_LOCAL,&info);
1471:       MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1472:       PetscViewerASCIIPushSynchronized(viewer);
1473:       if (!inodes) {
1474:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1475:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1476:       } else {
1477:         PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1478:                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1479:       }
1480:       MatGetInfo(aij->A,MAT_LOCAL,&info);
1481:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1482:       MatGetInfo(aij->B,MAT_LOCAL,&info);
1483:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1484:       PetscViewerFlush(viewer);
1485:       PetscViewerASCIIPopSynchronized(viewer);
1486:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1487:       VecScatterView(aij->Mvctx,viewer);
1488:       return(0);
1489:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1490:       PetscInt inodecount,inodelimit,*inodes;
1491:       MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1492:       if (inodes) {
1493:         PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1494:       } else {
1495:         PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1496:       }
1497:       return(0);
1498:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1499:       return(0);
1500:     }
1501:   } else if (isbinary) {
1502:     if (size == 1) {
1503:       PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1504:       MatView(aij->A,viewer);
1505:     } else {
1506:       MatView_MPIAIJ_Binary(mat,viewer);
1507:     }
1508:     return(0);
1509:   } else if (iascii && size == 1) {
1510:     PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1511:     MatView(aij->A,viewer);
1512:     return(0);
1513:   } else if (isdraw) {
1514:     PetscDraw draw;
1515:     PetscBool isnull;
1516:     PetscViewerDrawGetDraw(viewer,0,&draw);
1517:     PetscDrawIsNull(draw,&isnull);
1518:     if (isnull) return(0);
1519:   }

1521:   { /* assemble the entire matrix onto first processor */
1522:     Mat A = NULL, Av;
1523:     IS  isrow,iscol;

1525:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1526:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1527:     MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1528:     MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1529: /*  The commented code uses MatCreateSubMatrices instead */
1530: /*
1531:     Mat *AA, A = NULL, Av;
1532:     IS  isrow,iscol;

1534:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1535:     ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1536:     MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1537:     if (!rank) {
1538:        PetscObjectReference((PetscObject)AA[0]);
1539:        A    = AA[0];
1540:        Av   = AA[0];
1541:     }
1542:     MatDestroySubMatrices(1,&AA);
1543: */
1544:     ISDestroy(&iscol);
1545:     ISDestroy(&isrow);
1546:     /*
1547:        Everyone has to call to draw the matrix since the graphics waits are
1548:        synchronized across all processors that share the PetscDraw object
1549:     */
1550:     PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1551:     if (!rank) {
1552:       if (((PetscObject)mat)->name) {
1553:         PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1554:       }
1555:       MatView_SeqAIJ(Av,sviewer);
1556:     }
1557:     PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1558:     PetscViewerFlush(viewer);
1559:     MatDestroy(&A);
1560:   }
1561:   return(0);
1562: }

1564: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1565: {
1567:   PetscBool      iascii,isdraw,issocket,isbinary;

1570:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1571:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1572:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1573:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1574:   if (iascii || isdraw || isbinary || issocket) {
1575:     MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1576:   }
1577:   return(0);
1578: }

1580: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1581: {
1582:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1584:   Vec            bb1 = 0;
1585:   PetscBool      hasop;

1588:   if (flag == SOR_APPLY_UPPER) {
1589:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1590:     return(0);
1591:   }

1593:   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1594:     VecDuplicate(bb,&bb1);
1595:   }

1597:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1598:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1599:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1600:       its--;
1601:     }

1603:     while (its--) {
1604:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1605:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1607:       /* update rhs: bb1 = bb - B*x */
1608:       VecScale(mat->lvec,-1.0);
1609:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1611:       /* local sweep */
1612:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1613:     }
1614:   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1615:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1616:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1617:       its--;
1618:     }
1619:     while (its--) {
1620:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1621:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1623:       /* update rhs: bb1 = bb - B*x */
1624:       VecScale(mat->lvec,-1.0);
1625:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1627:       /* local sweep */
1628:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1629:     }
1630:   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1631:     if (flag & SOR_ZERO_INITIAL_GUESS) {
1632:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1633:       its--;
1634:     }
1635:     while (its--) {
1636:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1637:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);

1639:       /* update rhs: bb1 = bb - B*x */
1640:       VecScale(mat->lvec,-1.0);
1641:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);

1643:       /* local sweep */
1644:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1645:     }
1646:   } else if (flag & SOR_EISENSTAT) {
1647:     Vec xx1;

1649:     VecDuplicate(bb,&xx1);
1650:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

1652:     VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1653:     VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1654:     if (!mat->diag) {
1655:       MatCreateVecs(matin,&mat->diag,NULL);
1656:       MatGetDiagonal(matin,mat->diag);
1657:     }
1658:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1659:     if (hasop) {
1660:       MatMultDiagonalBlock(matin,xx,bb1);
1661:     } else {
1662:       VecPointwiseMult(bb1,mat->diag,xx);
1663:     }
1664:     VecAYPX(bb1,(omega-2.0)/omega,bb);

1666:     MatMultAdd(mat->B,mat->lvec,bb1,bb1);

1668:     /* local sweep */
1669:     (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1670:     VecAXPY(xx,1.0,xx1);
1671:     VecDestroy(&xx1);
1672:   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");

1674:   VecDestroy(&bb1);

1676:   matin->factorerrortype = mat->A->factorerrortype;
1677:   return(0);
1678: }

1680: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1681: {
1682:   Mat            aA,aB,Aperm;
1683:   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1684:   PetscScalar    *aa,*ba;
1685:   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1686:   PetscSF        rowsf,sf;
1687:   IS             parcolp = NULL;
1688:   PetscBool      done;

1692:   MatGetLocalSize(A,&m,&n);
1693:   ISGetIndices(rowp,&rwant);
1694:   ISGetIndices(colp,&cwant);
1695:   PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);

1697:   /* Invert row permutation to find out where my rows should go */
1698:   PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1699:   PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1700:   PetscSFSetFromOptions(rowsf);
1701:   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1702:   PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1703:   PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);

1705:   /* Invert column permutation to find out where my columns should go */
1706:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1707:   PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1708:   PetscSFSetFromOptions(sf);
1709:   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1710:   PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1711:   PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1712:   PetscSFDestroy(&sf);

1714:   ISRestoreIndices(rowp,&rwant);
1715:   ISRestoreIndices(colp,&cwant);
1716:   MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);

1718:   /* Find out where my gcols should go */
1719:   MatGetSize(aB,NULL,&ng);
1720:   PetscMalloc1(ng,&gcdest);
1721:   PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1722:   PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1723:   PetscSFSetFromOptions(sf);
1724:   PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1725:   PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1726:   PetscSFDestroy(&sf);

1728:   PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1729:   MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1730:   MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1731:   for (i=0; i<m; i++) {
1732:     PetscInt row = rdest[i],rowner;
1733:     PetscLayoutFindOwner(A->rmap,row,&rowner);
1734:     for (j=ai[i]; j<ai[i+1]; j++) {
1735:       PetscInt cowner,col = cdest[aj[j]];
1736:       PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1737:       if (rowner == cowner) dnnz[i]++;
1738:       else onnz[i]++;
1739:     }
1740:     for (j=bi[i]; j<bi[i+1]; j++) {
1741:       PetscInt cowner,col = gcdest[bj[j]];
1742:       PetscLayoutFindOwner(A->cmap,col,&cowner);
1743:       if (rowner == cowner) dnnz[i]++;
1744:       else onnz[i]++;
1745:     }
1746:   }
1747:   PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1748:   PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1749:   PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1750:   PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1751:   PetscSFDestroy(&rowsf);

1753:   MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1754:   MatSeqAIJGetArray(aA,&aa);
1755:   MatSeqAIJGetArray(aB,&ba);
1756:   for (i=0; i<m; i++) {
1757:     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1758:     PetscInt j0,rowlen;
1759:     rowlen = ai[i+1] - ai[i];
1760:     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1761:       for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1762:       MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1763:     }
1764:     rowlen = bi[i+1] - bi[i];
1765:     for (j0=j=0; j<rowlen; j0=j) {
1766:       for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1767:       MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1768:     }
1769:   }
1770:   MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1771:   MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1772:   MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1773:   MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1774:   MatSeqAIJRestoreArray(aA,&aa);
1775:   MatSeqAIJRestoreArray(aB,&ba);
1776:   PetscFree4(dnnz,onnz,tdnnz,tonnz);
1777:   PetscFree3(work,rdest,cdest);
1778:   PetscFree(gcdest);
1779:   if (parcolp) {ISDestroy(&colp);}
1780:   *B = Aperm;
1781:   return(0);
1782: }

1784: PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1785: {
1786:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1790:   MatGetSize(aij->B,NULL,nghosts);
1791:   if (ghosts) *ghosts = aij->garray;
1792:   return(0);
1793: }

1795: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1796: {
1797:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1798:   Mat            A    = mat->A,B = mat->B;
1800:   PetscLogDouble isend[5],irecv[5];

1803:   info->block_size = 1.0;
1804:   MatGetInfo(A,MAT_LOCAL,info);

1806:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1807:   isend[3] = info->memory;  isend[4] = info->mallocs;

1809:   MatGetInfo(B,MAT_LOCAL,info);

1811:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1812:   isend[3] += info->memory;  isend[4] += info->mallocs;
1813:   if (flag == MAT_LOCAL) {
1814:     info->nz_used      = isend[0];
1815:     info->nz_allocated = isend[1];
1816:     info->nz_unneeded  = isend[2];
1817:     info->memory       = isend[3];
1818:     info->mallocs      = isend[4];
1819:   } else if (flag == MAT_GLOBAL_MAX) {
1820:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));

1822:     info->nz_used      = irecv[0];
1823:     info->nz_allocated = irecv[1];
1824:     info->nz_unneeded  = irecv[2];
1825:     info->memory       = irecv[3];
1826:     info->mallocs      = irecv[4];
1827:   } else if (flag == MAT_GLOBAL_SUM) {
1828:     MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));

1830:     info->nz_used      = irecv[0];
1831:     info->nz_allocated = irecv[1];
1832:     info->nz_unneeded  = irecv[2];
1833:     info->memory       = irecv[3];
1834:     info->mallocs      = irecv[4];
1835:   }
1836:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1837:   info->fill_ratio_needed = 0;
1838:   info->factor_mallocs    = 0;
1839:   return(0);
1840: }

1842: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1843: {
1844:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

1848:   switch (op) {
1849:   case MAT_NEW_NONZERO_LOCATIONS:
1850:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1851:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1852:   case MAT_KEEP_NONZERO_PATTERN:
1853:   case MAT_NEW_NONZERO_LOCATION_ERR:
1854:   case MAT_USE_INODES:
1855:   case MAT_IGNORE_ZERO_ENTRIES:
1856:     MatCheckPreallocated(A,1);
1857:     MatSetOption(a->A,op,flg);
1858:     MatSetOption(a->B,op,flg);
1859:     break;
1860:   case MAT_ROW_ORIENTED:
1861:     MatCheckPreallocated(A,1);
1862:     a->roworiented = flg;

1864:     MatSetOption(a->A,op,flg);
1865:     MatSetOption(a->B,op,flg);
1866:     break;
1867:   case MAT_NEW_DIAGONALS:
1868:   case MAT_SORTED_FULL:
1869:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1870:     break;
1871:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1872:     a->donotstash = flg;
1873:     break;
1874:   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1875:   case MAT_SPD:
1876:   case MAT_SYMMETRIC:
1877:   case MAT_STRUCTURALLY_SYMMETRIC:
1878:   case MAT_HERMITIAN:
1879:   case MAT_SYMMETRY_ETERNAL:
1880:     break;
1881:   case MAT_SUBMAT_SINGLEIS:
1882:     A->submat_singleis = flg;
1883:     break;
1884:   case MAT_STRUCTURE_ONLY:
1885:     /* The option is handled directly by MatSetOption() */
1886:     break;
1887:   default:
1888:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1889:   }
1890:   return(0);
1891: }

1893: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1894: {
1895:   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1896:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1898:   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1899:   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1900:   PetscInt       *cmap,*idx_p;

1903:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1904:   mat->getrowactive = PETSC_TRUE;

1906:   if (!mat->rowvalues && (idx || v)) {
1907:     /*
1908:         allocate enough space to hold information from the longest row.
1909:     */
1910:     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1911:     PetscInt   max = 1,tmp;
1912:     for (i=0; i<matin->rmap->n; i++) {
1913:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1914:       if (max < tmp) max = tmp;
1915:     }
1916:     PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1917:   }

1919:   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1920:   lrow = row - rstart;

1922:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1923:   if (!v)   {pvA = 0; pvB = 0;}
1924:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1925:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1926:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1927:   nztot = nzA + nzB;

1929:   cmap = mat->garray;
1930:   if (v  || idx) {
1931:     if (nztot) {
1932:       /* Sort by increasing column numbers, assuming A and B already sorted */
1933:       PetscInt imark = -1;
1934:       if (v) {
1935:         *v = v_p = mat->rowvalues;
1936:         for (i=0; i<nzB; i++) {
1937:           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1938:           else break;
1939:         }
1940:         imark = i;
1941:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1942:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1943:       }
1944:       if (idx) {
1945:         *idx = idx_p = mat->rowindices;
1946:         if (imark > -1) {
1947:           for (i=0; i<imark; i++) {
1948:             idx_p[i] = cmap[cworkB[i]];
1949:           }
1950:         } else {
1951:           for (i=0; i<nzB; i++) {
1952:             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1953:             else break;
1954:           }
1955:           imark = i;
1956:         }
1957:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1958:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1959:       }
1960:     } else {
1961:       if (idx) *idx = 0;
1962:       if (v)   *v   = 0;
1963:     }
1964:   }
1965:   *nz  = nztot;
1966:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1967:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1968:   return(0);
1969: }

1971: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1972: {
1973:   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;

1976:   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1977:   aij->getrowactive = PETSC_FALSE;
1978:   return(0);
1979: }

1981: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1982: {
1983:   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1984:   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1986:   PetscInt       i,j,cstart = mat->cmap->rstart;
1987:   PetscReal      sum = 0.0;
1988:   MatScalar      *v;

1991:   if (aij->size == 1) {
1992:      MatNorm(aij->A,type,norm);
1993:   } else {
1994:     if (type == NORM_FROBENIUS) {
1995:       v = amat->a;
1996:       for (i=0; i<amat->nz; i++) {
1997:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1998:       }
1999:       v = bmat->a;
2000:       for (i=0; i<bmat->nz; i++) {
2001:         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
2002:       }
2003:       MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
2004:       *norm = PetscSqrtReal(*norm);
2005:       PetscLogFlops(2*amat->nz+2*bmat->nz);
2006:     } else if (type == NORM_1) { /* max column norm */
2007:       PetscReal *tmp,*tmp2;
2008:       PetscInt  *jj,*garray = aij->garray;
2009:       PetscCalloc1(mat->cmap->N+1,&tmp);
2010:       PetscMalloc1(mat->cmap->N+1,&tmp2);
2011:       *norm = 0.0;
2012:       v     = amat->a; jj = amat->j;
2013:       for (j=0; j<amat->nz; j++) {
2014:         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
2015:       }
2016:       v = bmat->a; jj = bmat->j;
2017:       for (j=0; j<bmat->nz; j++) {
2018:         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
2019:       }
2020:       MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
2021:       for (j=0; j<mat->cmap->N; j++) {
2022:         if (tmp2[j] > *norm) *norm = tmp2[j];
2023:       }
2024:       PetscFree(tmp);
2025:       PetscFree(tmp2);
2026:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
2027:     } else if (type == NORM_INFINITY) { /* max row norm */
2028:       PetscReal ntemp = 0.0;
2029:       for (j=0; j<aij->A->rmap->n; j++) {
2030:         v   = amat->a + amat->i[j];
2031:         sum = 0.0;
2032:         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
2033:           sum += PetscAbsScalar(*v); v++;
2034:         }
2035:         v = bmat->a + bmat->i[j];
2036:         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
2037:           sum += PetscAbsScalar(*v); v++;
2038:         }
2039:         if (sum > ntemp) ntemp = sum;
2040:       }
2041:       MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
2042:       PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
2043:     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
2044:   }
2045:   return(0);
2046: }

2048: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
2049: {
2050:   Mat_MPIAIJ      *a    =(Mat_MPIAIJ*)A->data,*b;
2051:   Mat_SeqAIJ      *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
2052:   PetscInt        M     = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
2053:   const PetscInt  *ai,*aj,*bi,*bj,*B_diag_i;
2054:   PetscErrorCode  ierr;
2055:   Mat             B,A_diag,*B_diag;
2056:   const MatScalar *array;

2059:   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
2060:   ai = Aloc->i; aj = Aloc->j;
2061:   bi = Bloc->i; bj = Bloc->j;
2062:   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
2063:     PetscInt             *d_nnz,*g_nnz,*o_nnz;
2064:     PetscSFNode          *oloc;
2065:     PETSC_UNUSED PetscSF sf;

2067:     PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
2068:     /* compute d_nnz for preallocation */
2069:     PetscArrayzero(d_nnz,na);
2070:     for (i=0; i<ai[ma]; i++) {
2071:       d_nnz[aj[i]]++;
2072:     }
2073:     /* compute local off-diagonal contributions */
2074:     PetscArrayzero(g_nnz,nb);
2075:     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
2076:     /* map those to global */
2077:     PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
2078:     PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2079:     PetscSFSetFromOptions(sf);
2080:     PetscArrayzero(o_nnz,na);
2081:     PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2082:     PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2083:     PetscSFDestroy(&sf);

2085:     MatCreate(PetscObjectComm((PetscObject)A),&B);
2086:     MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2087:     MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2088:     MatSetType(B,((PetscObject)A)->type_name);
2089:     MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2090:     PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2091:   } else {
2092:     B    = *matout;
2093:     MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2094:   }

2096:   b           = (Mat_MPIAIJ*)B->data;
2097:   A_diag      = a->A;
2098:   B_diag      = &b->A;
2099:   sub_B_diag  = (Mat_SeqAIJ*)(*B_diag)->data;
2100:   A_diag_ncol = A_diag->cmap->N;
2101:   B_diag_ilen = sub_B_diag->ilen;
2102:   B_diag_i    = sub_B_diag->i;

2104:   /* Set ilen for diagonal of B */
2105:   for (i=0; i<A_diag_ncol; i++) {
2106:     B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2107:   }

2109:   /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2110:   very quickly (=without using MatSetValues), because all writes are local. */
2111:   MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);

2113:   /* copy over the B part */
2114:   PetscMalloc1(bi[mb],&cols);
2115:   array = Bloc->a;
2116:   row   = A->rmap->rstart;
2117:   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2118:   cols_tmp = cols;
2119:   for (i=0; i<mb; i++) {
2120:     ncol = bi[i+1]-bi[i];
2121:     MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2122:     row++;
2123:     array += ncol; cols_tmp += ncol;
2124:   }
2125:   PetscFree(cols);

2127:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2128:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2129:   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2130:     *matout = B;
2131:   } else {
2132:     MatHeaderMerge(A,&B);
2133:   }
2134:   return(0);
2135: }

2137: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2138: {
2139:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2140:   Mat            a    = aij->A,b = aij->B;
2142:   PetscInt       s1,s2,s3;

2145:   MatGetLocalSize(mat,&s2,&s3);
2146:   if (rr) {
2147:     VecGetLocalSize(rr,&s1);
2148:     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2149:     /* Overlap communication with computation. */
2150:     VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2151:   }
2152:   if (ll) {
2153:     VecGetLocalSize(ll,&s1);
2154:     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2155:     (*b->ops->diagonalscale)(b,ll,0);
2156:   }
2157:   /* scale  the diagonal block */
2158:   (*a->ops->diagonalscale)(a,ll,rr);

2160:   if (rr) {
2161:     /* Do a scatter end and then right scale the off-diagonal block */
2162:     VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2163:     (*b->ops->diagonalscale)(b,0,aij->lvec);
2164:   }
2165:   return(0);
2166: }

2168: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2169: {
2170:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2174:   MatSetUnfactored(a->A);
2175:   return(0);
2176: }

2178: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2179: {
2180:   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2181:   Mat            a,b,c,d;
2182:   PetscBool      flg;

2186:   a = matA->A; b = matA->B;
2187:   c = matB->A; d = matB->B;

2189:   MatEqual(a,c,&flg);
2190:   if (flg) {
2191:     MatEqual(b,d,&flg);
2192:   }
2193:   MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2194:   return(0);
2195: }

2197: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2198: {
2200:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2201:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

2204:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2205:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2206:     /* because of the column compression in the off-processor part of the matrix a->B,
2207:        the number of columns in a->B and b->B may be different, hence we cannot call
2208:        the MatCopy() directly on the two parts. If need be, we can provide a more
2209:        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2210:        then copying the submatrices */
2211:     MatCopy_Basic(A,B,str);
2212:   } else {
2213:     MatCopy(a->A,b->A,str);
2214:     MatCopy(a->B,b->B,str);
2215:   }
2216:   PetscObjectStateIncrease((PetscObject)B);
2217:   return(0);
2218: }

2220: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2221: {

2225:   MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2226:   return(0);
2227: }

2229: /*
2230:    Computes the number of nonzeros per row needed for preallocation when X and Y
2231:    have different nonzero structure.
2232: */
2233: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2234: {
2235:   PetscInt       i,j,k,nzx,nzy;

2238:   /* Set the number of nonzeros in the new matrix */
2239:   for (i=0; i<m; i++) {
2240:     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2241:     nzx = xi[i+1] - xi[i];
2242:     nzy = yi[i+1] - yi[i];
2243:     nnz[i] = 0;
2244:     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2245:       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2246:       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2247:       nnz[i]++;
2248:     }
2249:     for (; k<nzy; k++) nnz[i]++;
2250:   }
2251:   return(0);
2252: }

2254: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2255: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2256: {
2258:   PetscInt       m = Y->rmap->N;
2259:   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2260:   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;

2263:   MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2264:   return(0);
2265: }

2267: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2268: {
2270:   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2271:   PetscBLASInt   bnz,one=1;
2272:   Mat_SeqAIJ     *x,*y;

2275:   if (str == SAME_NONZERO_PATTERN) {
2276:     PetscScalar alpha = a;
2277:     x    = (Mat_SeqAIJ*)xx->A->data;
2278:     PetscBLASIntCast(x->nz,&bnz);
2279:     y    = (Mat_SeqAIJ*)yy->A->data;
2280:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2281:     x    = (Mat_SeqAIJ*)xx->B->data;
2282:     y    = (Mat_SeqAIJ*)yy->B->data;
2283:     PetscBLASIntCast(x->nz,&bnz);
2284:     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2285:     PetscObjectStateIncrease((PetscObject)Y);
2286:     /* the MatAXPY_Basic* subroutines calls MatAssembly, so the matrix on the GPU
2287:        will be updated */
2288: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
2289:     if (Y->offloadmask != PETSC_OFFLOAD_UNALLOCATED) {
2290:       Y->offloadmask = PETSC_OFFLOAD_CPU;
2291:     }
2292: #endif
2293:   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2294:     MatAXPY_Basic(Y,a,X,str);
2295:   } else {
2296:     Mat      B;
2297:     PetscInt *nnz_d,*nnz_o;
2298:     PetscMalloc1(yy->A->rmap->N,&nnz_d);
2299:     PetscMalloc1(yy->B->rmap->N,&nnz_o);
2300:     MatCreate(PetscObjectComm((PetscObject)Y),&B);
2301:     PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2302:     MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2303:     MatSetBlockSizesFromMats(B,Y,Y);
2304:     MatSetType(B,MATMPIAIJ);
2305:     MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2306:     MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2307:     MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2308:     MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2309:     MatHeaderReplace(Y,&B);
2310:     PetscFree(nnz_d);
2311:     PetscFree(nnz_o);
2312:   }
2313:   return(0);
2314: }

2316: extern PetscErrorCode  MatConjugate_SeqAIJ(Mat);

2318: PetscErrorCode  MatConjugate_MPIAIJ(Mat mat)
2319: {
2320: #if defined(PETSC_USE_COMPLEX)
2322:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2325:   MatConjugate_SeqAIJ(aij->A);
2326:   MatConjugate_SeqAIJ(aij->B);
2327: #else
2329: #endif
2330:   return(0);
2331: }

2333: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2334: {
2335:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2339:   MatRealPart(a->A);
2340:   MatRealPart(a->B);
2341:   return(0);
2342: }

2344: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2345: {
2346:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2350:   MatImaginaryPart(a->A);
2351:   MatImaginaryPart(a->B);
2352:   return(0);
2353: }

2355: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2356: {
2357:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2359:   PetscInt       i,*idxb = 0;
2360:   PetscScalar    *va,*vb;
2361:   Vec            vtmp;

2364:   MatGetRowMaxAbs(a->A,v,idx);
2365:   VecGetArray(v,&va);
2366:   if (idx) {
2367:     for (i=0; i<A->rmap->n; i++) {
2368:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2369:     }
2370:   }

2372:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2373:   if (idx) {
2374:     PetscMalloc1(A->rmap->n,&idxb);
2375:   }
2376:   MatGetRowMaxAbs(a->B,vtmp,idxb);
2377:   VecGetArray(vtmp,&vb);

2379:   for (i=0; i<A->rmap->n; i++) {
2380:     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2381:       va[i] = vb[i];
2382:       if (idx) idx[i] = a->garray[idxb[i]];
2383:     }
2384:   }

2386:   VecRestoreArray(v,&va);
2387:   VecRestoreArray(vtmp,&vb);
2388:   PetscFree(idxb);
2389:   VecDestroy(&vtmp);
2390:   return(0);
2391: }

2393: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2394: {
2395:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2397:   PetscInt       i,*idxb = 0;
2398:   PetscScalar    *va,*vb;
2399:   Vec            vtmp;

2402:   MatGetRowMinAbs(a->A,v,idx);
2403:   VecGetArray(v,&va);
2404:   if (idx) {
2405:     for (i=0; i<A->cmap->n; i++) {
2406:       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2407:     }
2408:   }

2410:   VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2411:   if (idx) {
2412:     PetscMalloc1(A->rmap->n,&idxb);
2413:   }
2414:   MatGetRowMinAbs(a->B,vtmp,idxb);
2415:   VecGetArray(vtmp,&vb);

2417:   for (i=0; i<A->rmap->n; i++) {
2418:     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2419:       va[i] = vb[i];
2420:       if (idx) idx[i] = a->garray[idxb[i]];
2421:     }
2422:   }

2424:   VecRestoreArray(v,&va);
2425:   VecRestoreArray(vtmp,&vb);
2426:   PetscFree(idxb);
2427:   VecDestroy(&vtmp);
2428:   return(0);
2429: }

2431: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2432: {
2433:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2434:   PetscInt       n      = A->rmap->n;
2435:   PetscInt       cstart = A->cmap->rstart;
2436:   PetscInt       *cmap  = mat->garray;
2437:   PetscInt       *diagIdx, *offdiagIdx;
2438:   Vec            diagV, offdiagV;
2439:   PetscScalar    *a, *diagA, *offdiagA;
2440:   PetscInt       r;

2444:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2445:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2446:   VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2447:   MatGetRowMin(mat->A, diagV,    diagIdx);
2448:   MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2449:   VecGetArray(v,        &a);
2450:   VecGetArray(diagV,    &diagA);
2451:   VecGetArray(offdiagV, &offdiagA);
2452:   for (r = 0; r < n; ++r) {
2453:     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2454:       a[r]   = diagA[r];
2455:       idx[r] = cstart + diagIdx[r];
2456:     } else {
2457:       a[r]   = offdiagA[r];
2458:       idx[r] = cmap[offdiagIdx[r]];
2459:     }
2460:   }
2461:   VecRestoreArray(v,        &a);
2462:   VecRestoreArray(diagV,    &diagA);
2463:   VecRestoreArray(offdiagV, &offdiagA);
2464:   VecDestroy(&diagV);
2465:   VecDestroy(&offdiagV);
2466:   PetscFree2(diagIdx, offdiagIdx);
2467:   return(0);
2468: }

2470: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2471: {
2472:   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2473:   PetscInt       n      = A->rmap->n;
2474:   PetscInt       cstart = A->cmap->rstart;
2475:   PetscInt       *cmap  = mat->garray;
2476:   PetscInt       *diagIdx, *offdiagIdx;
2477:   Vec            diagV, offdiagV;
2478:   PetscScalar    *a, *diagA, *offdiagA;
2479:   PetscInt       r;

2483:   PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2484:   VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2485:   VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2486:   MatGetRowMax(mat->A, diagV,    diagIdx);
2487:   MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2488:   VecGetArray(v,        &a);
2489:   VecGetArray(diagV,    &diagA);
2490:   VecGetArray(offdiagV, &offdiagA);
2491:   for (r = 0; r < n; ++r) {
2492:     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2493:       a[r]   = diagA[r];
2494:       idx[r] = cstart + diagIdx[r];
2495:     } else {
2496:       a[r]   = offdiagA[r];
2497:       idx[r] = cmap[offdiagIdx[r]];
2498:     }
2499:   }
2500:   VecRestoreArray(v,        &a);
2501:   VecRestoreArray(diagV,    &diagA);
2502:   VecRestoreArray(offdiagV, &offdiagA);
2503:   VecDestroy(&diagV);
2504:   VecDestroy(&offdiagV);
2505:   PetscFree2(diagIdx, offdiagIdx);
2506:   return(0);
2507: }

2509: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2510: {
2512:   Mat            *dummy;

2515:   MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2516:   *newmat = *dummy;
2517:   PetscFree(dummy);
2518:   return(0);
2519: }

2521: PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2522: {
2523:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;

2527:   MatInvertBlockDiagonal(a->A,values);
2528:   A->factorerrortype = a->A->factorerrortype;
2529:   return(0);
2530: }

2532: static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2533: {
2535:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;

2538:   if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2539:   MatSetRandom(aij->A,rctx);
2540:   if (x->assembled) {
2541:     MatSetRandom(aij->B,rctx);
2542:   } else {
2543:     MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2544:   }
2545:   MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2546:   MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2547:   return(0);
2548: }

2550: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2551: {
2553:   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2554:   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2555:   return(0);
2556: }

2558: /*@
2559:    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap

2561:    Collective on Mat

2563:    Input Parameters:
2564: +    A - the matrix
2565: -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)

2567:  Level: advanced

2569: @*/
2570: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2571: {
2572:   PetscErrorCode       ierr;

2575:   PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2576:   return(0);
2577: }

2579: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2580: {
2581:   PetscErrorCode       ierr;
2582:   PetscBool            sc = PETSC_FALSE,flg;

2585:   PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2586:   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2587:   PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2588:   if (flg) {
2589:     MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2590:   }
2591:   PetscOptionsTail();
2592:   return(0);
2593: }

2595: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2596: {
2598:   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2599:   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;

2602:   if (!Y->preallocated) {
2603:     MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2604:   } else if (!aij->nz) {
2605:     PetscInt nonew = aij->nonew;
2606:     MatSeqAIJSetPreallocation(maij->A,1,NULL);
2607:     aij->nonew = nonew;
2608:   }
2609:   MatShift_Basic(Y,a);
2610:   return(0);
2611: }

2613: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2614: {
2615:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2619:   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2620:   MatMissingDiagonal(a->A,missing,d);
2621:   if (d) {
2622:     PetscInt rstart;
2623:     MatGetOwnershipRange(A,&rstart,NULL);
2624:     *d += rstart;

2626:   }
2627:   return(0);
2628: }

2630: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2631: {
2632:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;

2636:   MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2637:   return(0);
2638: }

2640: /* -------------------------------------------------------------------*/
2641: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2642:                                        MatGetRow_MPIAIJ,
2643:                                        MatRestoreRow_MPIAIJ,
2644:                                        MatMult_MPIAIJ,
2645:                                 /* 4*/ MatMultAdd_MPIAIJ,
2646:                                        MatMultTranspose_MPIAIJ,
2647:                                        MatMultTransposeAdd_MPIAIJ,
2648:                                        0,
2649:                                        0,
2650:                                        0,
2651:                                 /*10*/ 0,
2652:                                        0,
2653:                                        0,
2654:                                        MatSOR_MPIAIJ,
2655:                                        MatTranspose_MPIAIJ,
2656:                                 /*15*/ MatGetInfo_MPIAIJ,
2657:                                        MatEqual_MPIAIJ,
2658:                                        MatGetDiagonal_MPIAIJ,
2659:                                        MatDiagonalScale_MPIAIJ,
2660:                                        MatNorm_MPIAIJ,
2661:                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2662:                                        MatAssemblyEnd_MPIAIJ,
2663:                                        MatSetOption_MPIAIJ,
2664:                                        MatZeroEntries_MPIAIJ,
2665:                                 /*24*/ MatZeroRows_MPIAIJ,
2666:                                        0,
2667:                                        0,
2668:                                        0,
2669:                                        0,
2670:                                 /*29*/ MatSetUp_MPIAIJ,
2671:                                        0,
2672:                                        0,
2673:                                        MatGetDiagonalBlock_MPIAIJ,
2674:                                        0,
2675:                                 /*34*/ MatDuplicate_MPIAIJ,
2676:                                        0,
2677:                                        0,
2678:                                        0,
2679:                                        0,
2680:                                 /*39*/ MatAXPY_MPIAIJ,
2681:                                        MatCreateSubMatrices_MPIAIJ,
2682:                                        MatIncreaseOverlap_MPIAIJ,
2683:                                        MatGetValues_MPIAIJ,
2684:                                        MatCopy_MPIAIJ,
2685:                                 /*44*/ MatGetRowMax_MPIAIJ,
2686:                                        MatScale_MPIAIJ,
2687:                                        MatShift_MPIAIJ,
2688:                                        MatDiagonalSet_MPIAIJ,
2689:                                        MatZeroRowsColumns_MPIAIJ,
2690:                                 /*49*/ MatSetRandom_MPIAIJ,
2691:                                        0,
2692:                                        0,
2693:                                        0,
2694:                                        0,
2695:                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2696:                                        0,
2697:                                        MatSetUnfactored_MPIAIJ,
2698:                                        MatPermute_MPIAIJ,
2699:                                        0,
2700:                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2701:                                        MatDestroy_MPIAIJ,
2702:                                        MatView_MPIAIJ,
2703:                                        0,
2704:                                        MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2705:                                 /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2706:                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2707:                                        0,
2708:                                        0,
2709:                                        0,
2710:                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2711:                                        MatGetRowMinAbs_MPIAIJ,
2712:                                        0,
2713:                                        0,
2714:                                        0,
2715:                                        0,
2716:                                 /*75*/ MatFDColoringApply_AIJ,
2717:                                        MatSetFromOptions_MPIAIJ,
2718:                                        0,
2719:                                        0,
2720:                                        MatFindZeroDiagonals_MPIAIJ,
2721:                                 /*80*/ 0,
2722:                                        0,
2723:                                        0,
2724:                                 /*83*/ MatLoad_MPIAIJ,
2725:                                        MatIsSymmetric_MPIAIJ,
2726:                                        0,
2727:                                        0,
2728:                                        0,
2729:                                        0,
2730:                                 /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2731:                                        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2732:                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2733:                                        MatPtAP_MPIAIJ_MPIAIJ,
2734:                                        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2735:                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2736:                                        0,
2737:                                        0,
2738:                                        0,
2739:                                        MatPinToCPU_MPIAIJ,
2740:                                 /*99*/ 0,
2741:                                        0,
2742:                                        0,
2743:                                        MatConjugate_MPIAIJ,
2744:                                        0,
2745:                                 /*104*/MatSetValuesRow_MPIAIJ,
2746:                                        MatRealPart_MPIAIJ,
2747:                                        MatImaginaryPart_MPIAIJ,
2748:                                        0,
2749:                                        0,
2750:                                 /*109*/0,
2751:                                        0,
2752:                                        MatGetRowMin_MPIAIJ,
2753:                                        0,
2754:                                        MatMissingDiagonal_MPIAIJ,
2755:                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2756:                                        0,
2757:                                        MatGetGhosts_MPIAIJ,
2758:                                        0,
2759:                                        0,
2760:                                 /*119*/0,
2761:                                        0,
2762:                                        0,
2763:                                        0,
2764:                                        MatGetMultiProcBlock_MPIAIJ,
2765:                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2766:                                        MatGetColumnNorms_MPIAIJ,
2767:                                        MatInvertBlockDiagonal_MPIAIJ,
2768:                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2769:                                        MatCreateSubMatricesMPI_MPIAIJ,
2770:                                 /*129*/0,
2771:                                        MatTransposeMatMult_MPIAIJ_MPIAIJ,
2772:                                        MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2773:                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2774:                                        0,
2775:                                 /*134*/0,
2776:                                        0,
2777:                                        MatRARt_MPIAIJ_MPIAIJ,
2778:                                        0,
2779:                                        0,
2780:                                 /*139*/MatSetBlockSizes_MPIAIJ,
2781:                                        0,
2782:                                        0,
2783:                                        MatFDColoringSetUp_MPIXAIJ,
2784:                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2785:                                 /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2786: };

2788: /* ----------------------------------------------------------------------------------------*/

2790: PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2791: {
2792:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2796:   MatStoreValues(aij->A);
2797:   MatStoreValues(aij->B);
2798:   return(0);
2799: }

2801: PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2802: {
2803:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;

2807:   MatRetrieveValues(aij->A);
2808:   MatRetrieveValues(aij->B);
2809:   return(0);
2810: }

2812: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2813: {
2814:   Mat_MPIAIJ     *b;
2816:   PetscMPIInt    size;

2819:   PetscLayoutSetUp(B->rmap);
2820:   PetscLayoutSetUp(B->cmap);
2821:   b = (Mat_MPIAIJ*)B->data;

2823: #if defined(PETSC_USE_CTABLE)
2824:   PetscTableDestroy(&b->colmap);
2825: #else
2826:   PetscFree(b->colmap);
2827: #endif
2828:   PetscFree(b->garray);
2829:   VecDestroy(&b->lvec);
2830:   VecScatterDestroy(&b->Mvctx);

2832:   /* Because the B will have been resized we simply destroy it and create a new one each time */
2833:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2834:   MatDestroy(&b->B);
2835:   MatCreate(PETSC_COMM_SELF,&b->B);
2836:   MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2837:   MatSetBlockSizesFromMats(b->B,B,B);
2838:   MatSetType(b->B,MATSEQAIJ);
2839:   PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);

2841:   if (!B->preallocated) {
2842:     MatCreate(PETSC_COMM_SELF,&b->A);
2843:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2844:     MatSetBlockSizesFromMats(b->A,B,B);
2845:     MatSetType(b->A,MATSEQAIJ);
2846:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2847:   }

2849:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2850:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2851:   B->preallocated  = PETSC_TRUE;
2852:   B->was_assembled = PETSC_FALSE;
2853:   B->assembled     = PETSC_FALSE;
2854:   return(0);
2855: }

2857: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2858: {
2859:   Mat_MPIAIJ     *b;

2864:   PetscLayoutSetUp(B->rmap);
2865:   PetscLayoutSetUp(B->cmap);
2866:   b = (Mat_MPIAIJ*)B->data;

2868: #if defined(PETSC_USE_CTABLE)
2869:   PetscTableDestroy(&b->colmap);
2870: #else
2871:   PetscFree(b->colmap);
2872: #endif
2873:   PetscFree(b->garray);
2874:   VecDestroy(&b->lvec);
2875:   VecScatterDestroy(&b->Mvctx);

2877:   MatResetPreallocation(b->A);
2878:   MatResetPreallocation(b->B);
2879:   B->preallocated  = PETSC_TRUE;
2880:   B->was_assembled = PETSC_FALSE;
2881:   B->assembled = PETSC_FALSE;
2882:   return(0);
2883: }

2885: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2886: {
2887:   Mat            mat;
2888:   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;

2892:   *newmat = 0;
2893:   MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2894:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2895:   MatSetBlockSizesFromMats(mat,matin,matin);
2896:   MatSetType(mat,((PetscObject)matin)->type_name);
2897:   a       = (Mat_MPIAIJ*)mat->data;

2899:   mat->factortype   = matin->factortype;
2900:   mat->assembled    = PETSC_TRUE;
2901:   mat->insertmode   = NOT_SET_VALUES;
2902:   mat->preallocated = PETSC_TRUE;

2904:   a->size         = oldmat->size;
2905:   a->rank         = oldmat->rank;
2906:   a->donotstash   = oldmat->donotstash;
2907:   a->roworiented  = oldmat->roworiented;
2908:   a->rowindices   = 0;
2909:   a->rowvalues    = 0;
2910:   a->getrowactive = PETSC_FALSE;

2912:   PetscLayoutReference(matin->rmap,&mat->rmap);
2913:   PetscLayoutReference(matin->cmap,&mat->cmap);

2915:   if (oldmat->colmap) {
2916: #if defined(PETSC_USE_CTABLE)
2917:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2918: #else
2919:     PetscMalloc1(mat->cmap->N,&a->colmap);
2920:     PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2921:     PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2922: #endif
2923:   } else a->colmap = 0;
2924:   if (oldmat->garray) {
2925:     PetscInt len;
2926:     len  = oldmat->B->cmap->n;
2927:     PetscMalloc1(len+1,&a->garray);
2928:     PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2929:     if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2930:   } else a->garray = 0;

2932:   /* It may happen MatDuplicate is called with a non-assembled matrix
2933:      In fact, MatDuplicate only requires the matrix to be preallocated
2934:      This may happen inside a DMCreateMatrix_Shell */
2935:   if (oldmat->lvec) {
2936:     VecDuplicate(oldmat->lvec,&a->lvec);
2937:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2938:   }
2939:   if (oldmat->Mvctx) {
2940:     VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2941:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2942:   }
2943:   if (oldmat->Mvctx_mpi1) {
2944:     VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2945:     PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2946:   }

2948:   MatDuplicate(oldmat->A,cpvalues,&a->A);
2949:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2950:   MatDuplicate(oldmat->B,cpvalues,&a->B);
2951:   PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2952:   PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2953:   *newmat = mat;
2954:   return(0);
2955: }

2957: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2958: {
2959:   PetscBool      isbinary, ishdf5;

2965:   /* force binary viewer to load .info file if it has not yet done so */
2966:   PetscViewerSetUp(viewer);
2967:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2968:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5,  &ishdf5);
2969:   if (isbinary) {
2970:     MatLoad_MPIAIJ_Binary(newMat,viewer);
2971:   } else if (ishdf5) {
2972: #if defined(PETSC_HAVE_HDF5)
2973:     MatLoad_AIJ_HDF5(newMat,viewer);
2974: #else
2975:     SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2976: #endif
2977:   } else {
2978:     SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2979:   }
2980:   return(0);
2981: }

2983: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat newMat, PetscViewer viewer)
2984: {
2985:   PetscScalar    *vals,*svals;
2986:   MPI_Comm       comm;
2988:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
2989:   PetscInt       i,nz,j,rstart,rend,mmax,maxnz = 0;
2990:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2991:   PetscInt       *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2992:   PetscInt       cend,cstart,n,*rowners;
2993:   int            fd;
2994:   PetscInt       bs = newMat->rmap->bs;

2997:   PetscObjectGetComm((PetscObject)viewer,&comm);
2998:   MPI_Comm_size(comm,&size);
2999:   MPI_Comm_rank(comm,&rank);
3000:   PetscViewerBinaryGetDescriptor(viewer,&fd);
3001:   if (!rank) {
3002:     PetscBinaryRead(fd,(char*)header,4,NULL,PETSC_INT);
3003:     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
3004:     if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
3005:   }

3007:   PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");
3008:   PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
3009:   PetscOptionsEnd();
3010:   if (bs < 0) bs = 1;

3012:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
3013:   M    = header[1]; N = header[2];

3015:   /* If global sizes are set, check if they are consistent with that given in the file */
3016:   if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
3017:   if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);

3019:   /* determine ownership of all (block) rows */
3020:   if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
3021:   if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank));    /* PETSC_DECIDE */
3022:   else m = newMat->rmap->n; /* Set by user */

3024:   PetscMalloc1(size+1,&rowners);
3025:   MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);

3027:   /* First process needs enough room for process with most rows */
3028:   if (!rank) {
3029:     mmax = rowners[1];
3030:     for (i=2; i<=size; i++) {
3031:       mmax = PetscMax(mmax, rowners[i]);
3032:     }
3033:   } else mmax = -1;             /* unused, but compilers complain */

3035:   rowners[0] = 0;
3036:   for (i=2; i<=size; i++) {
3037:     rowners[i] += rowners[i-1];
3038:   }
3039:   rstart = rowners[rank];
3040:   rend   = rowners[rank+1];

3042:   /* distribute row lengths to all processors */
3043:   PetscMalloc2(m,&ourlens,m,&offlens);
3044:   if (!rank) {
3045:     PetscBinaryRead(fd,ourlens,m,NULL,PETSC_INT);
3046:     PetscMalloc1(mmax,&rowlengths);
3047:     PetscCalloc1(size,&procsnz);
3048:     for (j=0; j<m; j++) {
3049:       procsnz[0] += ourlens[j];
3050:     }
3051:     for (i=1; i<size; i++) {
3052:       PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],NULL,PETSC_INT);
3053:       /* calculate the number of nonzeros on each processor */
3054:       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
3055:         procsnz[i] += rowlengths[j];
3056:       }
3057:       MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
3058:     }
3059:     PetscFree(rowlengths);
3060:   } else {
3061:     MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);
3062:   }

3064:   if (!rank) {
3065:     /* determine max buffer needed and allocate it */
3066:     maxnz = 0;
3067:     for (i=0; i<size; i++) {
3068:       maxnz = PetscMax(maxnz,procsnz[i]);
3069:     }
3070:     PetscMalloc1(maxnz,&cols);

3072:     /* read in my part of the matrix column indices  */
3073:     nz   = procsnz[0];
3074:     PetscMalloc1(nz,&mycols);
3075:     PetscBinaryRead(fd,mycols,nz,NULL,PETSC_INT);

3077:     /* read in every one elses and ship off */
3078:     for (i=1; i<size; i++) {
3079:       nz   = procsnz[i];
3080:       PetscBinaryRead(fd,cols,nz,NULL,PETSC_INT);
3081:       MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);
3082:     }
3083:     PetscFree(cols);
3084:   } else {
3085:     /* determine buffer space needed for message */
3086:     nz = 0;
3087:     for (i=0; i<m; i++) {
3088:       nz += ourlens[i];
3089:     }
3090:     PetscMalloc1(nz,&mycols);

3092:     /* receive message of column indices*/
3093:     MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);
3094:   }

3096:   /* determine column ownership if matrix is not square */
3097:   if (N != M) {
3098:     if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
3099:     else n = newMat->cmap->n;
3100:     MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);
3101:     cstart = cend - n;
3102:   } else {
3103:     cstart = rstart;
3104:     cend   = rend;
3105:     n      = cend - cstart;
3106:   }

3108:   /* loop over local rows, determining number of off diagonal entries */
3109:   PetscArrayzero(offlens,m);
3110:   jj   = 0;
3111:   for (i=0; i<m; i++) {
3112:     for (j=0; j<ourlens[i]; j++) {
3113:       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
3114:       jj++;
3115:     }
3116:   }

3118:   for (i=0; i<m; i++) {
3119:     ourlens[i] -= offlens[i];
3120:   }
3121:   MatSetSizes(newMat,m,n,M,N);

3123:   if (bs > 1) {MatSetBlockSize(newMat,bs);}

3125:   MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);

3127:   for (i=0; i<m; i++) {
3128:     ourlens[i] += offlens[i];
3129:   }

3131:   if (!rank) {
3132:     PetscMalloc1(maxnz+1,&vals);

3134:     /* read in my part of the matrix numerical values  */
3135:     nz   = procsnz[0];
3136:     PetscBinaryRead(fd,vals,nz,NULL,PETSC_SCALAR);

3138:     /* insert into matrix */
3139:     jj      = rstart;
3140:     smycols = mycols;
3141:     svals   = vals;
3142:     for (i=0; i<m; i++) {
3143:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3144:       smycols += ourlens[i];
3145:       svals   += ourlens[i];
3146:       jj++;
3147:     }

3149:     /* read in other processors and ship out */
3150:     for (i=1; i<size; i++) {
3151:       nz   = procsnz[i];
3152:       PetscBinaryRead(fd,vals,nz,NULL,PETSC_SCALAR);
3153:       MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);
3154:     }
3155:     PetscFree(procsnz);
3156:   } else {
3157:     /* receive numeric values */
3158:     PetscMalloc1(nz+1,&vals);

3160:     /* receive message of values*/
3161:     MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);

3163:     /* insert into matrix */
3164:     jj      = rstart;
3165:     smycols = mycols;
3166:     svals   = vals;
3167:     for (i=0; i<m; i++) {
3168:       MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
3169:       smycols += ourlens[i];
3170:       svals   += ourlens[i];
3171:       jj++;
3172:     }
3173:   }
3174:   PetscFree2(ourlens,offlens);
3175:   PetscFree(vals);
3176:   PetscFree(mycols);
3177:   PetscFree(rowners);
3178:   MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);
3179:   MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);
3180:   return(0);
3181: }

3183: /* Not scalable because of ISAllGather() unless getting all columns. */
3184: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3185: {
3187:   IS             iscol_local;
3188:   PetscBool      isstride;
3189:   PetscMPIInt    lisstride=0,gisstride;

3192:   /* check if we are grabbing all columns*/
3193:   PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);

3195:   if (isstride) {
3196:     PetscInt  start,len,mstart,mlen;
3197:     ISStrideGetInfo(iscol,&start,NULL);
3198:     ISGetLocalSize(iscol,&len);
3199:     MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3200:     if (mstart == start && mlen-mstart == len) lisstride = 1;
3201:   }

3203:   MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3204:   if (gisstride) {
3205:     PetscInt N;
3206:     MatGetSize(mat,NULL,&N);
3207:     ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);
3208:     ISSetIdentity(iscol_local);
3209:     PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3210:   } else {
3211:     PetscInt cbs;
3212:     ISGetBlockSize(iscol,&cbs);
3213:     ISAllGather(iscol,&iscol_local);
3214:     ISSetBlockSize(iscol_local,cbs);
3215:   }

3217:   *isseq = iscol_local;
3218:   return(0);
3219: }

3221: /*
3222:  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3223:  (see MatCreateSubMatrix_MPIAIJ_nonscalable)

3225:  Input Parameters:
3226:    mat - matrix
3227:    isrow - parallel row index set; its local indices are a subset of local columns of mat,
3228:            i.e., mat->rstart <= isrow[i] < mat->rend
3229:    iscol - parallel column index set; its local indices are a subset of local columns of mat,
3230:            i.e., mat->cstart <= iscol[i] < mat->cend
3231:  Output Parameter:
3232:    isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3233:    iscol_o - sequential column index set for retrieving mat->B
3234:    garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3235:  */
3236: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3237: {
3239:   Vec            x,cmap;
3240:   const PetscInt *is_idx;
3241:   PetscScalar    *xarray,*cmaparray;
3242:   PetscInt       ncols,isstart,*idx,m,rstart,*cmap1,count;
3243:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3244:   Mat            B=a->B;
3245:   Vec            lvec=a->lvec,lcmap;
3246:   PetscInt       i,cstart,cend,Bn=B->cmap->N;
3247:   MPI_Comm       comm;
3248:   VecScatter     Mvctx=a->Mvctx;

3251:   PetscObjectGetComm((PetscObject)mat,&comm);
3252:   ISGetLocalSize(iscol,&ncols);

3254:   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3255:   MatCreateVecs(mat,&x,NULL);
3256:   VecSet(x,-1.0);
3257:   VecDuplicate(x,&cmap);
3258:   VecSet(cmap,-1.0);

3260:   /* Get start indices */
3261:   MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3262:   isstart -= ncols;
3263:   MatGetOwnershipRangeColumn(mat,&cstart,&cend);

3265:   ISGetIndices(iscol,&is_idx);
3266:   VecGetArray(x,&xarray);
3267:   VecGetArray(cmap,&cmaparray);
3268:   PetscMalloc1(ncols,&idx);
3269:   for (i=0; i<ncols; i++) {
3270:     xarray[is_idx[i]-cstart]    = (PetscScalar)is_idx[i];
3271:     cmaparray[is_idx[i]-cstart] = i + isstart;      /* global index of iscol[i] */
3272:     idx[i]                      = is_idx[i]-cstart; /* local index of iscol[i]  */
3273:   }
3274:   VecRestoreArray(x,&xarray);
3275:   VecRestoreArray(cmap,&cmaparray);
3276:   ISRestoreIndices(iscol,&is_idx);

3278:   /* Get iscol_d */
3279:   ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3280:   ISGetBlockSize(iscol,&i);
3281:   ISSetBlockSize(*iscol_d,i);

3283:   /* Get isrow_d */
3284:   ISGetLocalSize(isrow,&m);
3285:   rstart = mat->rmap->rstart;
3286:   PetscMalloc1(m,&idx);
3287:   ISGetIndices(isrow,&is_idx);
3288:   for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3289:   ISRestoreIndices(isrow,&is_idx);

3291:   ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3292:   ISGetBlockSize(isrow,&i);
3293:   ISSetBlockSize(*isrow_d,i);

3295:   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3296:   VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3297:   VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);

3299:   VecDuplicate(lvec,&lcmap);

3301:   VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3302:   VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);

3304:   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3305:   /* off-process column indices */
3306:   count = 0;
3307:   PetscMalloc1(Bn,&idx);
3308:   PetscMalloc1(Bn,&cmap1);

3310:   VecGetArray(lvec,&xarray);
3311:   VecGetArray(lcmap,&cmaparray);
3312:   for (i=0; i<Bn; i++) {
3313:     if (PetscRealPart(xarray[i]) > -1.0) {
3314:       idx[count]     = i;                   /* local column index in off-diagonal part B */
3315:       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]);  /* column index in submat */
3316:       count++;
3317:     }
3318:   }
3319:   VecRestoreArray(lvec,&xarray);
3320:   VecRestoreArray(lcmap,&cmaparray);

3322:   ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3323:   /* cannot ensure iscol_o has same blocksize as iscol! */

3325:   PetscFree(idx);
3326:   *garray = cmap1;

3328:   VecDestroy(&x);
3329:   VecDestroy(&cmap);
3330:   VecDestroy(&lcmap);
3331:   return(0);
3332: }

3334: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3335: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3336: {
3338:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)mat->data,*asub;
3339:   Mat            M = NULL;
3340:   MPI_Comm       comm;
3341:   IS             iscol_d,isrow_d,iscol_o;
3342:   Mat            Asub = NULL,Bsub = NULL;
3343:   PetscInt       n;

3346:   PetscObjectGetComm((PetscObject)mat,&comm);

3348:   if (call == MAT_REUSE_MATRIX) {
3349:     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3350:     PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3351:     if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");

3353:     PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3354:     if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");

3356:     PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3357:     if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");

3359:     /* Update diagonal and off-diagonal portions of submat */
3360:     asub = (Mat_MPIAIJ*)(*submat)->data;
3361:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3362:     ISGetLocalSize(iscol_o,&n);
3363:     if (n) {
3364:       MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3365:     }
3366:     MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3367:     MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);

3369:   } else { /* call == MAT_INITIAL_MATRIX) */
3370:     const PetscInt *garray;
3371:     PetscInt        BsubN;

3373:     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3374:     ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);

3376:     /* Create local submatrices Asub and Bsub */
3377:     MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3378:     MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);

3380:     /* Create submatrix M */
3381:     MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);

3383:     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3384:     asub = (Mat_MPIAIJ*)M->data;

3386:     ISGetLocalSize(iscol_o,&BsubN);
3387:     n = asub->B->cmap->N;
3388:     if (BsubN > n) {
3389:       /* This case can be tested using ~petsc/src/tao/bound/examples/tutorials/runplate2_3 */
3390:       const PetscInt *idx;
3391:       PetscInt       i,j,*idx_new,*subgarray = asub->garray;
3392:       PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);

3394:       PetscMalloc1(n,&idx_new);
3395:       j = 0;
3396:       ISGetIndices(iscol_o,&idx);
3397:       for (i=0; i<n; i++) {
3398:         if (j >= BsubN) break;
3399:         while (subgarray[i] > garray[j]) j++;

3401:         if (subgarray[i] == garray[j]) {
3402:           idx_new[i] = idx[j++];
3403:         } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3404:       }
3405:       ISRestoreIndices(iscol_o,&idx);

3407:       ISDestroy(&iscol_o);
3408:       ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);

3410:     } else if (BsubN < n) {
3411:       SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3412:     }

3414:     PetscFree(garray);
3415:     *submat = M;

3417:     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3418:     PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3419:     ISDestroy(&isrow_d);

3421:     PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3422:     ISDestroy(&iscol_d);

3424:     PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3425:     ISDestroy(&iscol_o);
3426:   }
3427:   return(0);
3428: }

3430: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3431: {
3433:   IS             iscol_local=NULL,isrow_d;
3434:   PetscInt       csize;
3435:   PetscInt       n,i,j,start,end;
3436:   PetscBool      sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3437:   MPI_Comm       comm;

3440:   /* If isrow has same processor distribution as mat,
3441:      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3442:   if (call == MAT_REUSE_MATRIX) {
3443:     PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3444:     if (isrow_d) {
3445:       sameRowDist  = PETSC_TRUE;
3446:       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3447:     } else {
3448:       PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3449:       if (iscol_local) {
3450:         sameRowDist  = PETSC_TRUE;
3451:         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3452:       }
3453:     }
3454:   } else {
3455:     /* Check if isrow has same processor distribution as mat */
3456:     sameDist[0] = PETSC_FALSE;
3457:     ISGetLocalSize(isrow,&n);
3458:     if (!n) {
3459:       sameDist[0] = PETSC_TRUE;
3460:     } else {
3461:       ISGetMinMax(isrow,&i,&j);
3462:       MatGetOwnershipRange(mat,&start,&end);
3463:       if (i >= start && j < end) {
3464:         sameDist[0] = PETSC_TRUE;
3465:       }
3466:     }

3468:     /* Check if iscol has same processor distribution as mat */
3469:     sameDist[1] = PETSC_FALSE;
3470:     ISGetLocalSize(iscol,&n);
3471:     if (!n) {
3472:       sameDist[1] = PETSC_TRUE;
3473:     } else {
3474:       ISGetMinMax(iscol,&i,&j);
3475:       MatGetOwnershipRangeColumn(mat,&start,&end);
3476:       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3477:     }

3479:     PetscObjectGetComm((PetscObject)mat,&comm);
3480:     MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3481:     sameRowDist = tsameDist[0];
3482:   }

3484:   if (sameRowDist) {
3485:     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3486:       /* isrow and iscol have same processor distribution as mat */
3487:       MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3488:       return(0);
3489:     } else { /* sameRowDist */
3490:       /* isrow has same processor distribution as mat */
3491:       if (call == MAT_INITIAL_MATRIX) {
3492:         PetscBool sorted;
3493:         ISGetSeqIS_Private(mat,iscol,&iscol_local);
3494:         ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3495:         ISGetSize(iscol,&i);
3496:         if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);

3498:         ISSorted(iscol_local,&sorted);
3499:         if (sorted) {
3500:           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3501:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3502:           return(0);
3503:         }
3504:       } else { /* call == MAT_REUSE_MATRIX */
3505:         IS    iscol_sub;
3506:         PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3507:         if (iscol_sub) {
3508:           MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3509:           return(0);
3510:         }
3511:       }
3512:     }
3513:   }

3515:   /* General case: iscol -> iscol_local which has global size of iscol */
3516:   if (call == MAT_REUSE_MATRIX) {
3517:     PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3518:     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3519:   } else {
3520:     if (!iscol_local) {
3521:       ISGetSeqIS_Private(mat,iscol,&iscol_local);
3522:     }
3523:   }

3525:   ISGetLocalSize(iscol,&csize);
3526:   MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);

3528:   if (call == MAT_INITIAL_MATRIX) {
3529:     PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3530:     ISDestroy(&iscol_local);
3531:   }
3532:   return(0);
3533: }

3535: /*@C
3536:      MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3537:          and "off-diagonal" part of the matrix in CSR format.

3539:    Collective

3541:    Input Parameters:
3542: +  comm - MPI communicator
3543: .  A - "diagonal" portion of matrix
3544: .  B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3545: -  garray - global index of B columns

3547:    Output Parameter:
3548: .   mat - the matrix, with input A as its local diagonal matrix
3549:    Level: advanced

3551:    Notes:
3552:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3553:        A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.

3555: .seealso: MatCreateMPIAIJWithSplitArrays()
3556: @*/
3557: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3558: {
3560:   Mat_MPIAIJ     *maij;
3561:   Mat_SeqAIJ     *b=(Mat_SeqAIJ*)B->data,*bnew;
3562:   PetscInt       *oi=b->i,*oj=b->j,i,nz,col;
3563:   PetscScalar    *oa=b->a;
3564:   Mat            Bnew;
3565:   PetscInt       m,n,N;

3568:   MatCreate(comm,mat);
3569:   MatGetSize(A,&m,&n);
3570:   if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3571:   if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3572:   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3573:   /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */

3575:   /* Get global columns of mat */
3576:   MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);

3578:   MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3579:   MatSetType(*mat,MATMPIAIJ);
3580:   MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3581:   maij = (Mat_MPIAIJ*)(*mat)->data;

3583:   (*mat)->preallocated = PETSC_TRUE;

3585:   PetscLayoutSetUp((*mat)->rmap);
3586:   PetscLayoutSetUp((*mat)->cmap);

3588:   /* Set A as diagonal portion of *mat */
3589:   maij->A = A;

3591:   nz = oi[m];
3592:   for (i=0; i<nz; i++) {
3593:     col   = oj[i];
3594:     oj[i] = garray[col];
3595:   }

3597:    /* Set Bnew as off-diagonal portion of *mat */
3598:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3599:   bnew        = (Mat_SeqAIJ*)Bnew->data;
3600:   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3601:   maij->B     = Bnew;

3603:   if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);

3605:   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3606:   b->free_a       = PETSC_FALSE;
3607:   b->free_ij      = PETSC_FALSE;
3608:   MatDestroy(&B);

3610:   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3611:   bnew->free_a       = PETSC_TRUE;
3612:   bnew->free_ij      = PETSC_TRUE;

3614:   /* condense columns of maij->B */
3615:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3616:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3617:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3618:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3619:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3620:   return(0);
3621: }

3623: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);

3625: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3626: {
3628:   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3629:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3630:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3631:   Mat            M,Msub,B=a->B;
3632:   MatScalar      *aa;
3633:   Mat_SeqAIJ     *aij;
3634:   PetscInt       *garray = a->garray,*colsub,Ncols;
3635:   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3636:   IS             iscol_sub,iscmap;
3637:   const PetscInt *is_idx,*cmap;
3638:   PetscBool      allcolumns=PETSC_FALSE;
3639:   MPI_Comm       comm;

3642:   PetscObjectGetComm((PetscObject)mat,&comm);

3644:   if (call == MAT_REUSE_MATRIX) {
3645:     PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3646:     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3647:     ISGetLocalSize(iscol_sub,&count);

3649:     PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3650:     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");

3652:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3653:     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");

3655:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);

3657:   } else { /* call == MAT_INITIAL_MATRIX) */
3658:     PetscBool flg;

3660:     ISGetLocalSize(iscol,&n);
3661:     ISGetSize(iscol,&Ncols);

3663:     /* (1) iscol -> nonscalable iscol_local */
3664:     /* Check for special case: each processor gets entire matrix columns */
3665:     ISIdentity(iscol_local,&flg);
3666:     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3667:     if (allcolumns) {
3668:       iscol_sub = iscol_local;
3669:       PetscObjectReference((PetscObject)iscol_local);
3670:       ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);

3672:     } else {
3673:       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3674:       PetscInt *idx,*cmap1,k;
3675:       PetscMalloc1(Ncols,&idx);
3676:       PetscMalloc1(Ncols,&cmap1);
3677:       ISGetIndices(iscol_local,&is_idx);
3678:       count = 0;
3679:       k     = 0;
3680:       for (i=0; i<Ncols; i++) {
3681:         j = is_idx[i];
3682:         if (j >= cstart && j < cend) {
3683:           /* diagonal part of mat */
3684:           idx[count]     = j;
3685:           cmap1[count++] = i; /* column index in submat */
3686:         } else if (Bn) {
3687:           /* off-diagonal part of mat */
3688:           if (j == garray[k]) {
3689:             idx[count]     = j;
3690:             cmap1[count++] = i;  /* column index in submat */
3691:           } else if (j > garray[k]) {
3692:             while (j > garray[k] && k < Bn-1) k++;
3693:             if (j == garray[k]) {
3694:               idx[count]     = j;
3695:               cmap1[count++] = i; /* column index in submat */
3696:             }
3697:           }
3698:         }
3699:       }
3700:       ISRestoreIndices(iscol_local,&is_idx);

3702:       ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3703:       ISGetBlockSize(iscol,&cbs);
3704:       ISSetBlockSize(iscol_sub,cbs);

3706:       ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3707:     }

3709:     /* (3) Create sequential Msub */
3710:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3711:   }

3713:   ISGetLocalSize(iscol_sub,&count);
3714:   aij  = (Mat_SeqAIJ*)(Msub)->data;
3715:   ii   = aij->i;
3716:   ISGetIndices(iscmap,&cmap);

3718:   /*
3719:       m - number of local rows
3720:       Ncols - number of columns (same on all processors)
3721:       rstart - first row in new global matrix generated
3722:   */
3723:   MatGetSize(Msub,&m,NULL);

3725:   if (call == MAT_INITIAL_MATRIX) {
3726:     /* (4) Create parallel newmat */
3727:     PetscMPIInt    rank,size;
3728:     PetscInt       csize;

3730:     MPI_Comm_size(comm,&size);
3731:     MPI_Comm_rank(comm,&rank);

3733:     /*
3734:         Determine the number of non-zeros in the diagonal and off-diagonal
3735:         portions of the matrix in order to do correct preallocation
3736:     */

3738:     /* first get start and end of "diagonal" columns */
3739:     ISGetLocalSize(iscol,&csize);
3740:     if (csize == PETSC_DECIDE) {
3741:       ISGetSize(isrow,&mglobal);
3742:       if (mglobal == Ncols) { /* square matrix */
3743:         nlocal = m;
3744:       } else {
3745:         nlocal = Ncols/size + ((Ncols % size) > rank);
3746:       }
3747:     } else {
3748:       nlocal = csize;
3749:     }
3750:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3751:     rstart = rend - nlocal;
3752:     if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);

3754:     /* next, compute all the lengths */
3755:     jj    = aij->j;
3756:     PetscMalloc1(2*m+1,&dlens);
3757:     olens = dlens + m;
3758:     for (i=0; i<m; i++) {
3759:       jend = ii[i+1] - ii[i];
3760:       olen = 0;
3761:       dlen = 0;
3762:       for (j=0; j<jend; j++) {
3763:         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3764:         else dlen++;
3765:         jj++;
3766:       }
3767:       olens[i] = olen;
3768:       dlens[i] = dlen;
3769:     }

3771:     ISGetBlockSize(isrow,&bs);
3772:     ISGetBlockSize(iscol,&cbs);

3774:     MatCreate(comm,&M);
3775:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3776:     MatSetBlockSizes(M,bs,cbs);
3777:     MatSetType(M,((PetscObject)mat)->type_name);
3778:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3779:     PetscFree(dlens);

3781:   } else { /* call == MAT_REUSE_MATRIX */
3782:     M    = *newmat;
3783:     MatGetLocalSize(M,&i,NULL);
3784:     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3785:     MatZeroEntries(M);
3786:     /*
3787:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3788:        rather than the slower MatSetValues().
3789:     */
3790:     M->was_assembled = PETSC_TRUE;
3791:     M->assembled     = PETSC_FALSE;
3792:   }

3794:   /* (5) Set values of Msub to *newmat */
3795:   PetscMalloc1(count,&colsub);
3796:   MatGetOwnershipRange(M,&rstart,NULL);

3798:   jj   = aij->j;
3799:   aa   = aij->a;
3800:   for (i=0; i<m; i++) {
3801:     row = rstart + i;
3802:     nz  = ii[i+1] - ii[i];
3803:     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3804:     MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3805:     jj += nz; aa += nz;
3806:   }
3807:   ISRestoreIndices(iscmap,&cmap);

3809:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3810:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);

3812:   PetscFree(colsub);

3814:   /* save Msub, iscol_sub and iscmap used in processor for next request */
3815:   if (call ==  MAT_INITIAL_MATRIX) {
3816:     *newmat = M;
3817:     PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3818:     MatDestroy(&Msub);

3820:     PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3821:     ISDestroy(&iscol_sub);

3823:     PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3824:     ISDestroy(&iscmap);

3826:     if (iscol_local) {
3827:       PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3828:       ISDestroy(&iscol_local);
3829:     }
3830:   }
3831:   return(0);
3832: }

3834: /*
3835:     Not great since it makes two copies of the submatrix, first an SeqAIJ
3836:   in local and then by concatenating the local matrices the end result.
3837:   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()

3839:   Note: This requires a sequential iscol with all indices.
3840: */
3841: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3842: {
3844:   PetscMPIInt    rank,size;
3845:   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3846:   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3847:   Mat            M,Mreuse;
3848:   MatScalar      *aa,*vwork;
3849:   MPI_Comm       comm;
3850:   Mat_SeqAIJ     *aij;
3851:   PetscBool      colflag,allcolumns=PETSC_FALSE;

3854:   PetscObjectGetComm((PetscObject)mat,&comm);
3855:   MPI_Comm_rank(comm,&rank);
3856:   MPI_Comm_size(comm,&size);

3858:   /* Check for special case: each processor gets entire matrix columns */
3859:   ISIdentity(iscol,&colflag);
3860:   ISGetLocalSize(iscol,&n);
3861:   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;

3863:   if (call ==  MAT_REUSE_MATRIX) {
3864:     PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3865:     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3866:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3867:   } else {
3868:     MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3869:   }

3871:   /*
3872:       m - number of local rows
3873:       n - number of columns (same on all processors)
3874:       rstart - first row in new global matrix generated
3875:   */
3876:   MatGetSize(Mreuse,&m,&n);
3877:   MatGetBlockSizes(Mreuse,&bs,&cbs);
3878:   if (call == MAT_INITIAL_MATRIX) {
3879:     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3880:     ii  = aij->i;
3881:     jj  = aij->j;

3883:     /*
3884:         Determine the number of non-zeros in the diagonal and off-diagonal
3885:         portions of the matrix in order to do correct preallocation
3886:     */

3888:     /* first get start and end of "diagonal" columns */
3889:     if (csize == PETSC_DECIDE) {
3890:       ISGetSize(isrow,&mglobal);
3891:       if (mglobal == n) { /* square matrix */
3892:         nlocal = m;
3893:       } else {
3894:         nlocal = n/size + ((n % size) > rank);
3895:       }
3896:     } else {
3897:       nlocal = csize;
3898:     }
3899:     MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3900:     rstart = rend - nlocal;
3901:     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);

3903:     /* next, compute all the lengths */
3904:     PetscMalloc1(2*m+1,&dlens);
3905:     olens = dlens + m;
3906:     for (i=0; i<m; i++) {
3907:       jend = ii[i+1] - ii[i];
3908:       olen = 0;
3909:       dlen = 0;
3910:       for (j=0; j<jend; j++) {
3911:         if (*jj < rstart || *jj >= rend) olen++;
3912:         else dlen++;
3913:         jj++;
3914:       }
3915:       olens[i] = olen;
3916:       dlens[i] = dlen;
3917:     }
3918:     MatCreate(comm,&M);
3919:     MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3920:     MatSetBlockSizes(M,bs,cbs);
3921:     MatSetType(M,((PetscObject)mat)->type_name);
3922:     MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3923:     PetscFree(dlens);
3924:   } else {
3925:     PetscInt ml,nl;

3927:     M    = *newmat;
3928:     MatGetLocalSize(M,&ml,&nl);
3929:     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3930:     MatZeroEntries(M);
3931:     /*
3932:          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3933:        rather than the slower MatSetValues().
3934:     */
3935:     M->was_assembled = PETSC_TRUE;
3936:     M->assembled     = PETSC_FALSE;
3937:   }
3938:   MatGetOwnershipRange(M,&rstart,&rend);
3939:   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3940:   ii   = aij->i;
3941:   jj   = aij->j;
3942:   aa   = aij->a;
3943:   for (i=0; i<m; i++) {
3944:     row   = rstart + i;
3945:     nz    = ii[i+1] - ii[i];
3946:     cwork = jj;     jj += nz;
3947:     vwork = aa;     aa += nz;
3948:     MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3949:   }

3951:   MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3952:   MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3953:   *newmat = M;

3955:   /* save submatrix used in processor for next request */
3956:   if (call ==  MAT_INITIAL_MATRIX) {
3957:     PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3958:     MatDestroy(&Mreuse);
3959:   }
3960:   return(0);
3961: }

3963: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3964: {
3965:   PetscInt       m,cstart, cend,j,nnz,i,d;
3966:   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3967:   const PetscInt *JJ;
3969:   PetscBool      nooffprocentries;

3972:   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);

3974:   PetscLayoutSetUp(B->rmap);
3975:   PetscLayoutSetUp(B->cmap);
3976:   m      = B->rmap->n;
3977:   cstart = B->cmap->rstart;
3978:   cend   = B->cmap->rend;
3979:   rstart = B->rmap->rstart;

3981:   PetscCalloc2(m,&d_nnz,m,&o_nnz);

3983: #if defined(PETSC_USE_DEBUG)
3984:   for (i=0; i<m; i++) {
3985:     nnz = Ii[i+1]- Ii[i];
3986:     JJ  = J + Ii[i];
3987:     if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3988:     if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3989:     if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3990:   }
3991: #endif

3993:   for (i=0; i<m; i++) {
3994:     nnz     = Ii[i+1]- Ii[i];
3995:     JJ      = J + Ii[i];
3996:     nnz_max = PetscMax(nnz_max,nnz);
3997:     d       = 0;
3998:     for (j=0; j<nnz; j++) {
3999:       if (cstart <= JJ[j] && JJ[j] < cend) d++;
4000:     }
4001:     d_nnz[i] = d;
4002:     o_nnz[i] = nnz - d;
4003:   }
4004:   MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
4005:   PetscFree2(d_nnz,o_nnz);

4007:   for (i=0; i<m; i++) {
4008:     ii   = i + rstart;
4009:     MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
4010:   }
4011:   nooffprocentries    = B->nooffprocentries;
4012:   B->nooffprocentries = PETSC_TRUE;
4013:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4014:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
4015:   B->nooffprocentries = nooffprocentries;

4017:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
4018:   return(0);
4019: }

4021: /*@
4022:    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
4023:    (the default parallel PETSc format).

4025:    Collective

4027:    Input Parameters:
4028: +  B - the matrix
4029: .  i - the indices into j for the start of each local row (starts with zero)
4030: .  j - the column indices for each local row (starts with zero)
4031: -  v - optional values in the matrix

4033:    Level: developer

4035:    Notes:
4036:        The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
4037:      thus you CANNOT change the matrix entries by changing the values of v[] after you have
4038:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4040:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4042:        The format which is used for the sparse matrix input, is equivalent to a
4043:     row-major ordering.. i.e for the following matrix, the input data expected is
4044:     as shown

4046: $        1 0 0
4047: $        2 0 3     P0
4048: $       -------
4049: $        4 5 6     P1
4050: $
4051: $     Process0 [P0]: rows_owned=[0,1]
4052: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4053: $        j =  {0,0,2}  [size = 3]
4054: $        v =  {1,2,3}  [size = 3]
4055: $
4056: $     Process1 [P1]: rows_owned=[2]
4057: $        i =  {0,3}    [size = nrow+1  = 1+1]
4058: $        j =  {0,1,2}  [size = 3]
4059: $        v =  {4,5,6}  [size = 3]

4061: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
4062:           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
4063: @*/
4064: PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
4065: {

4069:   PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
4070:   return(0);
4071: }

4073: /*@C
4074:    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
4075:    (the default parallel PETSc format).  For good matrix assembly performance
4076:    the user should preallocate the matrix storage by setting the parameters
4077:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4078:    performance can be increased by more than a factor of 50.

4080:    Collective

4082:    Input Parameters:
4083: +  B - the matrix
4084: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4085:            (same value is used for all local rows)
4086: .  d_nnz - array containing the number of nonzeros in the various rows of the
4087:            DIAGONAL portion of the local submatrix (possibly different for each row)
4088:            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
4089:            The size of this array is equal to the number of local rows, i.e 'm'.
4090:            For matrices that will be factored, you must leave room for (and set)
4091:            the diagonal entry even if it is zero.
4092: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4093:            submatrix (same value is used for all local rows).
4094: -  o_nnz - array containing the number of nonzeros in the various rows of the
4095:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4096:            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
4097:            structure. The size of this array is equal to the number
4098:            of local rows, i.e 'm'.

4100:    If the *_nnz parameter is given then the *_nz parameter is ignored

4102:    The AIJ format (also called the Yale sparse matrix format or
4103:    compressed row storage (CSR)), is fully compatible with standard Fortran 77
4104:    storage.  The stored row and column indices begin with zero.
4105:    See Users-Manual: ch_mat for details.

4107:    The parallel matrix is partitioned such that the first m0 rows belong to
4108:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
4109:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

4111:    The DIAGONAL portion of the local submatrix of a processor can be defined
4112:    as the submatrix which is obtained by extraction the part corresponding to
4113:    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4114:    first row that belongs to the processor, r2 is the last row belonging to
4115:    the this processor, and c1-c2 is range of indices of the local part of a
4116:    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
4117:    common case of a square matrix, the row and column ranges are the same and
4118:    the DIAGONAL part is also square. The remaining portion of the local
4119:    submatrix (mxN) constitute the OFF-DIAGONAL portion.

4121:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4123:    You can call MatGetInfo() to get information on how effective the preallocation was;
4124:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4125:    You can also run with the option -info and look for messages with the string
4126:    malloc in them to see if additional memory allocation was needed.

4128:    Example usage:

4130:    Consider the following 8x8 matrix with 34 non-zero values, that is
4131:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4132:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4133:    as follows:

4135: .vb
4136:             1  2  0  |  0  3  0  |  0  4
4137:     Proc0   0  5  6  |  7  0  0  |  8  0
4138:             9  0 10  | 11  0  0  | 12  0
4139:     -------------------------------------
4140:            13  0 14  | 15 16 17  |  0  0
4141:     Proc1   0 18  0  | 19 20 21  |  0  0
4142:             0  0  0  | 22 23  0  | 24  0
4143:     -------------------------------------
4144:     Proc2  25 26 27  |  0  0 28  | 29  0
4145:            30  0  0  | 31 32 33  |  0 34
4146: .ve

4148:    This can be represented as a collection of submatrices as:

4150: .vb
4151:       A B C
4152:       D E F
4153:       G H I
4154: .ve

4156:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4157:    owned by proc1, G,H,I are owned by proc2.

4159:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4160:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4161:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4163:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4164:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4165:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4166:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4167:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4168:    matrix, ans [DF] as another SeqAIJ matrix.

4170:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4171:    allocated for every row of the local diagonal submatrix, and o_nz
4172:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4173:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4174:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4175:    In this case, the values of d_nz,o_nz are:
4176: .vb
4177:      proc0 : dnz = 2, o_nz = 2
4178:      proc1 : dnz = 3, o_nz = 2
4179:      proc2 : dnz = 1, o_nz = 4
4180: .ve
4181:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4182:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4183:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4184:    34 values.

4186:    When d_nnz, o_nnz parameters are specified, the storage is specified
4187:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4188:    In the above case the values for d_nnz,o_nnz are:
4189: .vb
4190:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4191:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4192:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4193: .ve
4194:    Here the space allocated is sum of all the above values i.e 34, and
4195:    hence pre-allocation is perfect.

4197:    Level: intermediate

4199: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4200:           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4201: @*/
4202: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4203: {

4209:   PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4210:   return(0);
4211: }

4213: /*@
4214:      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4215:          CSR format for the local rows.

4217:    Collective

4219:    Input Parameters:
4220: +  comm - MPI communicator
4221: .  m - number of local rows (Cannot be PETSC_DECIDE)
4222: .  n - This value should be the same as the local size used in creating the
4223:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4224:        calculated if N is given) For square matrices n is almost always m.
4225: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4226: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4227: .   i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4228: .   j - column indices
4229: -   a - matrix values

4231:    Output Parameter:
4232: .   mat - the matrix

4234:    Level: intermediate

4236:    Notes:
4237:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4238:      thus you CANNOT change the matrix entries by changing the values of a[] after you have
4239:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

4241:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

4243:        The format which is used for the sparse matrix input, is equivalent to a
4244:     row-major ordering.. i.e for the following matrix, the input data expected is
4245:     as shown

4247:        Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays

4249: $        1 0 0
4250: $        2 0 3     P0
4251: $       -------
4252: $        4 5 6     P1
4253: $
4254: $     Process0 [P0]: rows_owned=[0,1]
4255: $        i =  {0,1,3}  [size = nrow+1  = 2+1]
4256: $        j =  {0,0,2}  [size = 3]
4257: $        v =  {1,2,3}  [size = 3]
4258: $
4259: $     Process1 [P1]: rows_owned=[2]
4260: $        i =  {0,3}    [size = nrow+1  = 1+1]
4261: $        j =  {0,1,2}  [size = 3]
4262: $        v =  {4,5,6}  [size = 3]

4264: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4265:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4266: @*/
4267: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4268: {

4272:   if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4273:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4274:   MatCreate(comm,mat);
4275:   MatSetSizes(*mat,m,n,M,N);
4276:   /* MatSetBlockSizes(M,bs,cbs); */
4277:   MatSetType(*mat,MATMPIAIJ);
4278:   MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4279:   return(0);
4280: }

4282: /*@
4283:      MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4284:          CSR format for the local rows. Only the numerical values are updated the other arrays must be identical

4286:    Collective

4288:    Input Parameters:
4289: +  mat - the matrix
4290: .  m - number of local rows (Cannot be PETSC_DECIDE)
4291: .  n - This value should be the same as the local size used in creating the
4292:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4293:        calculated if N is given) For square matrices n is almost always m.
4294: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4295: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4296: .  Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4297: .  J - column indices
4298: -  v - matrix values

4300:    Level: intermediate

4302: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4303:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4304: @*/
4305: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4306: {
4308:   PetscInt       cstart,nnz,i,j;
4309:   PetscInt       *ld;
4310:   PetscBool      nooffprocentries;
4311:   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*)mat->data;
4312:   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ*)Aij->A->data, *Ao  = (Mat_SeqAIJ*)Aij->B->data;
4313:   PetscScalar    *ad = Ad->a, *ao = Ao->a;
4314:   const PetscInt *Adi = Ad->i;
4315:   PetscInt       ldi,Iii,md;

4318:   if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4319:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4320:   if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4321:   if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");

4323:   cstart = mat->cmap->rstart;
4324:   if (!Aij->ld) {
4325:     /* count number of entries below block diagonal */
4326:     PetscCalloc1(m,&ld);
4327:     Aij->ld = ld;
4328:     for (i=0; i<m; i++) {
4329:       nnz  = Ii[i+1]- Ii[i];
4330:       j     = 0;
4331:       while  (J[j] < cstart && j < nnz) {j++;}
4332:       J    += nnz;
4333:       ld[i] = j;
4334:     }
4335:   } else {
4336:     ld = Aij->ld;
4337:   }

4339:   for (i=0; i<m; i++) {
4340:     nnz  = Ii[i+1]- Ii[i];
4341:     Iii  = Ii[i];
4342:     ldi  = ld[i];
4343:     md   = Adi[i+1]-Adi[i];
4344:     PetscArraycpy(ao,v + Iii,ldi);
4345:     PetscArraycpy(ad,v + Iii + ldi,md);
4346:     PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4347:     ad  += md;
4348:     ao  += nnz - md;
4349:   }
4350:   nooffprocentries      = mat->nooffprocentries;
4351:   mat->nooffprocentries = PETSC_TRUE;
4352:   PetscObjectStateIncrease((PetscObject)Aij->A);
4353:   PetscObjectStateIncrease((PetscObject)Aij->B);
4354:   PetscObjectStateIncrease((PetscObject)mat);
4355:   MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4356:   MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4357:   mat->nooffprocentries = nooffprocentries;
4358:   return(0);
4359: }

4361: /*@C
4362:    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4363:    (the default parallel PETSc format).  For good matrix assembly performance
4364:    the user should preallocate the matrix storage by setting the parameters
4365:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
4366:    performance can be increased by more than a factor of 50.

4368:    Collective

4370:    Input Parameters:
4371: +  comm - MPI communicator
4372: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4373:            This value should be the same as the local size used in creating the
4374:            y vector for the matrix-vector product y = Ax.
4375: .  n - This value should be the same as the local size used in creating the
4376:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4377:        calculated if N is given) For square matrices n is almost always m.
4378: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4379: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4380: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4381:            (same value is used for all local rows)
4382: .  d_nnz - array containing the number of nonzeros in the various rows of the
4383:            DIAGONAL portion of the local submatrix (possibly different for each row)
4384:            or NULL, if d_nz is used to specify the nonzero structure.
4385:            The size of this array is equal to the number of local rows, i.e 'm'.
4386: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4387:            submatrix (same value is used for all local rows).
4388: -  o_nnz - array containing the number of nonzeros in the various rows of the
4389:            OFF-DIAGONAL portion of the local submatrix (possibly different for
4390:            each row) or NULL, if o_nz is used to specify the nonzero
4391:            structure. The size of this array is equal to the number
4392:            of local rows, i.e 'm'.

4394:    Output Parameter:
4395: .  A - the matrix

4397:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4398:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
4399:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

4401:    Notes:
4402:    If the *_nnz parameter is given then the *_nz parameter is ignored

4404:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
4405:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4406:    storage requirements for this matrix.

4408:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
4409:    processor than it must be used on all processors that share the object for
4410:    that argument.

4412:    The user MUST specify either the local or global matrix dimensions
4413:    (possibly both).

4415:    The parallel matrix is partitioned across processors such that the
4416:    first m0 rows belong to process 0, the next m1 rows belong to
4417:    process 1, the next m2 rows belong to process 2 etc.. where
4418:    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4419:    values corresponding to [m x N] submatrix.

4421:    The columns are logically partitioned with the n0 columns belonging
4422:    to 0th partition, the next n1 columns belonging to the next
4423:    partition etc.. where n0,n1,n2... are the input parameter 'n'.

4425:    The DIAGONAL portion of the local submatrix on any given processor
4426:    is the submatrix corresponding to the rows and columns m,n
4427:    corresponding to the given processor. i.e diagonal matrix on
4428:    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4429:    etc. The remaining portion of the local submatrix [m x (N-n)]
4430:    constitute the OFF-DIAGONAL portion. The example below better
4431:    illustrates this concept.

4433:    For a square global matrix we define each processor's diagonal portion
4434:    to be its local rows and the corresponding columns (a square submatrix);
4435:    each processor's off-diagonal portion encompasses the remainder of the
4436:    local matrix (a rectangular submatrix).

4438:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

4440:    When calling this routine with a single process communicator, a matrix of
4441:    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
4442:    type of communicator, use the construction mechanism
4443: .vb
4444:      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4445: .ve

4447: $     MatCreate(...,&A);
4448: $     MatSetType(A,MATMPIAIJ);
4449: $     MatSetSizes(A, m,n,M,N);
4450: $     MatMPIAIJSetPreallocation(A,...);

4452:    By default, this format uses inodes (identical nodes) when possible.
4453:    We search for consecutive rows with the same nonzero structure, thereby
4454:    reusing matrix information to achieve increased efficiency.

4456:    Options Database Keys:
4457: +  -mat_no_inode  - Do not use inodes
4458: -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)



4462:    Example usage:

4464:    Consider the following 8x8 matrix with 34 non-zero values, that is
4465:    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4466:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4467:    as follows

4469: .vb
4470:             1  2  0  |  0  3  0  |  0  4
4471:     Proc0   0  5  6  |  7  0  0  |  8  0
4472:             9  0 10  | 11  0  0  | 12  0
4473:     -------------------------------------
4474:            13  0 14  | 15 16 17  |  0  0
4475:     Proc1   0 18  0  | 19 20 21  |  0  0
4476:             0  0  0  | 22 23  0  | 24  0
4477:     -------------------------------------
4478:     Proc2  25 26 27  |  0  0 28  | 29  0
4479:            30  0  0  | 31 32 33  |  0 34
4480: .ve

4482:    This can be represented as a collection of submatrices as

4484: .vb
4485:       A B C
4486:       D E F
4487:       G H I
4488: .ve

4490:    Where the submatrices A,B,C are owned by proc0, D,E,F are
4491:    owned by proc1, G,H,I are owned by proc2.

4493:    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4494:    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4495:    The 'M','N' parameters are 8,8, and have the same values on all procs.

4497:    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4498:    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4499:    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4500:    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4501:    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4502:    matrix, ans [DF] as another SeqAIJ matrix.

4504:    When d_nz, o_nz parameters are specified, d_nz storage elements are
4505:    allocated for every row of the local diagonal submatrix, and o_nz
4506:    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4507:    One way to choose d_nz and o_nz is to use the max nonzerors per local
4508:    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4509:    In this case, the values of d_nz,o_nz are
4510: .vb
4511:      proc0 : dnz = 2, o_nz = 2
4512:      proc1 : dnz = 3, o_nz = 2
4513:      proc2 : dnz = 1, o_nz = 4
4514: .ve
4515:    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4516:    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4517:    for proc3. i.e we are using 12+15+10=37 storage locations to store
4518:    34 values.

4520:    When d_nnz, o_nnz parameters are specified, the storage is specified
4521:    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4522:    In the above case the values for d_nnz,o_nnz are
4523: .vb
4524:      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4525:      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4526:      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4527: .ve
4528:    Here the space allocated is sum of all the above values i.e 34, and
4529:    hence pre-allocation is perfect.

4531:    Level: intermediate

4533: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4534:           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4535: @*/
4536: PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4537: {
4539:   PetscMPIInt    size;

4542:   MatCreate(comm,A);
4543:   MatSetSizes(*A,m,n,M,N);
4544:   MPI_Comm_size(comm,&size);
4545:   if (size > 1) {
4546:     MatSetType(*A,MATMPIAIJ);
4547:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4548:   } else {
4549:     MatSetType(*A,MATSEQAIJ);
4550:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4551:   }
4552:   return(0);
4553: }

4555: /*@C
4556:   MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix
4557:   
4558:   Not collective
4559:   
4560:   Input Parameter:
4561: . A - The MPIAIJ matrix

4563:   Output Parameters:
4564: + Ad - The local diagonal block as a SeqAIJ matrix
4565: . Ao - The local off-diagonal block as a SeqAIJ matrix
4566: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix

4568:   Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4569:   in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4570:   the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4571:   local column numbers to global column numbers in the original matrix.

4573:   Level: intermediate

4575: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAJ, MATSEQAIJ
4576: @*/
4577: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4578: {
4579:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4580:   PetscBool      flg;

4584:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4585:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4586:   if (Ad)     *Ad     = a->A;
4587:   if (Ao)     *Ao     = a->B;
4588:   if (colmap) *colmap = a->garray;
4589:   return(0);
4590: }

4592: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4593: {
4595:   PetscInt       m,N,i,rstart,nnz,Ii;
4596:   PetscInt       *indx;
4597:   PetscScalar    *values;

4600:   MatGetSize(inmat,&m,&N);
4601:   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4602:     PetscInt       *dnz,*onz,sum,bs,cbs;

4604:     if (n == PETSC_DECIDE) {
4605:       PetscSplitOwnership(comm,&n,&N);
4606:     }
4607:     /* Check sum(n) = N */
4608:     MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4609:     if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);

4611:     MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4612:     rstart -= m;

4614:     MatPreallocateInitialize(comm,m,n,dnz,onz);
4615:     for (i=0; i<m; i++) {
4616:       MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4617:       MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4618:       MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4619:     }

4621:     MatCreate(comm,outmat);
4622:     MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4623:     MatGetBlockSizes(inmat,&bs,&cbs);
4624:     MatSetBlockSizes(*outmat,bs,cbs);
4625:     MatSetType(*outmat,MATAIJ);
4626:     MatSeqAIJSetPreallocation(*outmat,0,dnz);
4627:     MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4628:     MatPreallocateFinalize(dnz,onz);
4629:   }

4631:   /* numeric phase */
4632:   MatGetOwnershipRange(*outmat,&rstart,NULL);
4633:   for (i=0; i<m; i++) {
4634:     MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4635:     Ii   = i + rstart;
4636:     MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4637:     MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4638:   }
4639:   MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4640:   MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4641:   return(0);
4642: }

4644: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4645: {
4646:   PetscErrorCode    ierr;
4647:   PetscMPIInt       rank;
4648:   PetscInt          m,N,i,rstart,nnz;
4649:   size_t            len;
4650:   const PetscInt    *indx;
4651:   PetscViewer       out;
4652:   char              *name;
4653:   Mat               B;
4654:   const PetscScalar *values;

4657:   MatGetLocalSize(A,&m,0);
4658:   MatGetSize(A,0,&N);
4659:   /* Should this be the type of the diagonal block of A? */
4660:   MatCreate(PETSC_COMM_SELF,&B);
4661:   MatSetSizes(B,m,N,m,N);
4662:   MatSetBlockSizesFromMats(B,A,A);
4663:   MatSetType(B,MATSEQAIJ);
4664:   MatSeqAIJSetPreallocation(B,0,NULL);
4665:   MatGetOwnershipRange(A,&rstart,0);
4666:   for (i=0; i<m; i++) {
4667:     MatGetRow(A,i+rstart,&nnz,&indx,&values);
4668:     MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4669:     MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4670:   }
4671:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4672:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);

4674:   MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4675:   PetscStrlen(outfile,&len);
4676:   PetscMalloc1(len+5,&name);
4677:   sprintf(name,"%s.%d",outfile,rank);
4678:   PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4679:   PetscFree(name);
4680:   MatView(B,out);
4681:   PetscViewerDestroy(&out);
4682:   MatDestroy(&B);
4683:   return(0);
4684: }

4686: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4687: {
4688:   PetscErrorCode      ierr;
4689:   Mat_Merge_SeqsToMPI *merge;
4690:   PetscContainer      container;

4693:   PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4694:   if (container) {
4695:     PetscContainerGetPointer(container,(void**)&merge);
4696:     PetscFree(merge->id_r);
4697:     PetscFree(merge->len_s);
4698:     PetscFree(merge->len_r);
4699:     PetscFree(merge->bi);
4700:     PetscFree(merge->bj);
4701:     PetscFree(merge->buf_ri[0]);
4702:     PetscFree(merge->buf_ri);
4703:     PetscFree(merge->buf_rj[0]);
4704:     PetscFree(merge->buf_rj);
4705:     PetscFree(merge->coi);
4706:     PetscFree(merge->coj);
4707:     PetscFree(merge->owners_co);
4708:     PetscLayoutDestroy(&merge->rowmap);
4709:     PetscFree(merge);
4710:     PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4711:   }
4712:   MatDestroy_MPIAIJ(A);
4713:   return(0);
4714: }

4716:  #include <../src/mat/utils/freespace.h>
4717:  #include <petscbt.h>

4719: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4720: {
4721:   PetscErrorCode      ierr;
4722:   MPI_Comm            comm;
4723:   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4724:   PetscMPIInt         size,rank,taga,*len_s;
4725:   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4726:   PetscInt            proc,m;
4727:   PetscInt            **buf_ri,**buf_rj;
4728:   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4729:   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4730:   MPI_Request         *s_waits,*r_waits;
4731:   MPI_Status          *status;
4732:   MatScalar           *aa=a->a;
4733:   MatScalar           **abuf_r,*ba_i;
4734:   Mat_Merge_SeqsToMPI *merge;
4735:   PetscContainer      container;

4738:   PetscObjectGetComm((PetscObject)mpimat,&comm);
4739:   PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);

4741:   MPI_Comm_size(comm,&size);
4742:   MPI_Comm_rank(comm,&rank);

4744:   PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4745:   PetscContainerGetPointer(container,(void**)&merge);

4747:   bi     = merge->bi;
4748:   bj     = merge->bj;
4749:   buf_ri = merge->buf_ri;
4750:   buf_rj = merge->buf_rj;

4752:   PetscMalloc1(size,&status);
4753:   owners = merge->rowmap->range;
4754:   len_s  = merge->len_s;

4756:   /* send and recv matrix values */
4757:   /*-----------------------------*/
4758:   PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4759:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

4761:   PetscMalloc1(merge->nsend+1,&s_waits);
4762:   for (proc=0,k=0; proc<size; proc++) {
4763:     if (!len_s[proc]) continue;
4764:     i    = owners[proc];
4765:     MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4766:     k++;
4767:   }

4769:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4770:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4771:   PetscFree(status);

4773:   PetscFree(s_waits);
4774:   PetscFree(r_waits);

4776:   /* insert mat values of mpimat */
4777:   /*----------------------------*/
4778:   PetscMalloc1(N,&ba_i);
4779:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4781:   for (k=0; k<merge->nrecv; k++) {
4782:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4783:     nrows       = *(buf_ri_k[k]);
4784:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4785:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4786:   }

4788:   /* set values of ba */
4789:   m = merge->rowmap->n;
4790:   for (i=0; i<m; i++) {
4791:     arow = owners[rank] + i;
4792:     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4793:     bnzi = bi[i+1] - bi[i];
4794:     PetscArrayzero(ba_i,bnzi);

4796:     /* add local non-zero vals of this proc's seqmat into ba */
4797:     anzi   = ai[arow+1] - ai[arow];
4798:     aj     = a->j + ai[arow];
4799:     aa     = a->a + ai[arow];
4800:     nextaj = 0;
4801:     for (j=0; nextaj<anzi; j++) {
4802:       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4803:         ba_i[j] += aa[nextaj++];
4804:       }
4805:     }

4807:     /* add received vals into ba */
4808:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4809:       /* i-th row */
4810:       if (i == *nextrow[k]) {
4811:         anzi   = *(nextai[k]+1) - *nextai[k];
4812:         aj     = buf_rj[k] + *(nextai[k]);
4813:         aa     = abuf_r[k] + *(nextai[k]);
4814:         nextaj = 0;
4815:         for (j=0; nextaj<anzi; j++) {
4816:           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4817:             ba_i[j] += aa[nextaj++];
4818:           }
4819:         }
4820:         nextrow[k]++; nextai[k]++;
4821:       }
4822:     }
4823:     MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4824:   }
4825:   MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4826:   MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);

4828:   PetscFree(abuf_r[0]);
4829:   PetscFree(abuf_r);
4830:   PetscFree(ba_i);
4831:   PetscFree3(buf_ri_k,nextrow,nextai);
4832:   PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4833:   return(0);
4834: }

4836: PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4837: {
4838:   PetscErrorCode      ierr;
4839:   Mat                 B_mpi;
4840:   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4841:   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4842:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4843:   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4844:   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4845:   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4846:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4847:   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4848:   MPI_Status          *status;
4849:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4850:   PetscBT             lnkbt;
4851:   Mat_Merge_SeqsToMPI *merge;
4852:   PetscContainer      container;

4855:   PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);

4857:   /* make sure it is a PETSc comm */
4858:   PetscCommDuplicate(comm,&comm,NULL);
4859:   MPI_Comm_size(comm,&size);
4860:   MPI_Comm_rank(comm,&rank);

4862:   PetscNew(&merge);
4863:   PetscMalloc1(size,&status);

4865:   /* determine row ownership */
4866:   /*---------------------------------------------------------*/
4867:   PetscLayoutCreate(comm,&merge->rowmap);
4868:   PetscLayoutSetLocalSize(merge->rowmap,m);
4869:   PetscLayoutSetSize(merge->rowmap,M);
4870:   PetscLayoutSetBlockSize(merge->rowmap,1);
4871:   PetscLayoutSetUp(merge->rowmap);
4872:   PetscMalloc1(size,&len_si);
4873:   PetscMalloc1(size,&merge->len_s);

4875:   m      = merge->rowmap->n;
4876:   owners = merge->rowmap->range;

4878:   /* determine the number of messages to send, their lengths */
4879:   /*---------------------------------------------------------*/
4880:   len_s = merge->len_s;

4882:   len          = 0; /* length of buf_si[] */
4883:   merge->nsend = 0;
4884:   for (proc=0; proc<size; proc++) {
4885:     len_si[proc] = 0;
4886:     if (proc == rank) {
4887:       len_s[proc] = 0;
4888:     } else {
4889:       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4890:       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4891:     }
4892:     if (len_s[proc]) {
4893:       merge->nsend++;
4894:       nrows = 0;
4895:       for (i=owners[proc]; i<owners[proc+1]; i++) {
4896:         if (ai[i+1] > ai[i]) nrows++;
4897:       }
4898:       len_si[proc] = 2*(nrows+1);
4899:       len         += len_si[proc];
4900:     }
4901:   }

4903:   /* determine the number and length of messages to receive for ij-structure */
4904:   /*-------------------------------------------------------------------------*/
4905:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4906:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

4908:   /* post the Irecv of j-structure */
4909:   /*-------------------------------*/
4910:   PetscCommGetNewTag(comm,&tagj);
4911:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);

4913:   /* post the Isend of j-structure */
4914:   /*--------------------------------*/
4915:   PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);

4917:   for (proc=0, k=0; proc<size; proc++) {
4918:     if (!len_s[proc]) continue;
4919:     i    = owners[proc];
4920:     MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4921:     k++;
4922:   }

4924:   /* receives and sends of j-structure are complete */
4925:   /*------------------------------------------------*/
4926:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4927:   if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}

4929:   /* send and recv i-structure */
4930:   /*---------------------------*/
4931:   PetscCommGetNewTag(comm,&tagi);
4932:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);

4934:   PetscMalloc1(len+1,&buf_s);
4935:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4936:   for (proc=0,k=0; proc<size; proc++) {
4937:     if (!len_s[proc]) continue;
4938:     /* form outgoing message for i-structure:
4939:          buf_si[0]:                 nrows to be sent
4940:                [1:nrows]:           row index (global)
4941:                [nrows+1:2*nrows+1]: i-structure index
4942:     */
4943:     /*-------------------------------------------*/
4944:     nrows       = len_si[proc]/2 - 1;
4945:     buf_si_i    = buf_si + nrows+1;
4946:     buf_si[0]   = nrows;
4947:     buf_si_i[0] = 0;
4948:     nrows       = 0;
4949:     for (i=owners[proc]; i<owners[proc+1]; i++) {
4950:       anzi = ai[i+1] - ai[i];
4951:       if (anzi) {
4952:         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4953:         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4954:         nrows++;
4955:       }
4956:     }
4957:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4958:     k++;
4959:     buf_si += len_si[proc];
4960:   }

4962:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4963:   if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}

4965:   PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4966:   for (i=0; i<merge->nrecv; i++) {
4967:     PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4968:   }

4970:   PetscFree(len_si);
4971:   PetscFree(len_ri);
4972:   PetscFree(rj_waits);
4973:   PetscFree2(si_waits,sj_waits);
4974:   PetscFree(ri_waits);
4975:   PetscFree(buf_s);
4976:   PetscFree(status);

4978:   /* compute a local seq matrix in each processor */
4979:   /*----------------------------------------------*/
4980:   /* allocate bi array and free space for accumulating nonzero column info */
4981:   PetscMalloc1(m+1,&bi);
4982:   bi[0] = 0;

4984:   /* create and initialize a linked list */
4985:   nlnk = N+1;
4986:   PetscLLCreate(N,N,nlnk,lnk,lnkbt);

4988:   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4989:   len  = ai[owners[rank+1]] - ai[owners[rank]];
4990:   PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);

4992:   current_space = free_space;

4994:   /* determine symbolic info for each local row */
4995:   PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);

4997:   for (k=0; k<merge->nrecv; k++) {
4998:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4999:     nrows       = *buf_ri_k[k];
5000:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
5001:     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
5002:   }

5004:   MatPreallocateInitialize(comm,m,n,dnz,onz);
5005:   len  = 0;
5006:   for (i=0; i<m; i++) {
5007:     bnzi = 0;
5008:     /* add local non-zero cols of this proc's seqmat into lnk */
5009:     arow  = owners[rank] + i;
5010:     anzi  = ai[arow+1] - ai[arow];
5011:     aj    = a->j + ai[arow];
5012:     PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
5013:     bnzi += nlnk;
5014:     /* add received col data into lnk */
5015:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
5016:       if (i == *nextrow[k]) { /* i-th row */
5017:         anzi  = *(nextai[k]+1) - *nextai[k];
5018:         aj    = buf_rj[k] + *nextai[k];
5019:         PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
5020:         bnzi += nlnk;
5021:         nextrow[k]++; nextai[k]++;
5022:       }
5023:     }
5024:     if (len < bnzi) len = bnzi;  /* =max(bnzi) */

5026:     /* if free space is not available, make more free space */
5027:     if (current_space->local_remaining<bnzi) {
5028:       PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);
5029:       nspacedouble++;
5030:     }
5031:     /* copy data into free space, then initialize lnk */
5032:     PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
5033:     MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);

5035:     current_space->array           += bnzi;
5036:     current_space->local_used      += bnzi;
5037:     current_space->local_remaining -= bnzi;

5039:     bi[i+1] = bi[i] + bnzi;
5040:   }

5042:   PetscFree3(buf_ri_k,nextrow,nextai);

5044:   PetscMalloc1(bi[m]+1,&bj);
5045:   PetscFreeSpaceContiguous(&free_space,bj);
5046:   PetscLLDestroy(lnk,lnkbt);

5048:   /* create symbolic parallel matrix B_mpi */
5049:   /*---------------------------------------*/
5050:   MatGetBlockSizes(seqmat,&bs,&cbs);
5051:   MatCreate(comm,&B_mpi);
5052:   if (n==PETSC_DECIDE) {
5053:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
5054:   } else {
5055:     MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5056:   }
5057:   MatSetBlockSizes(B_mpi,bs,cbs);
5058:   MatSetType(B_mpi,MATMPIAIJ);
5059:   MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
5060:   MatPreallocateFinalize(dnz,onz);
5061:   MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);

5063:   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
5064:   B_mpi->assembled    = PETSC_FALSE;
5065:   B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
5066:   merge->bi           = bi;
5067:   merge->bj           = bj;
5068:   merge->buf_ri       = buf_ri;
5069:   merge->buf_rj       = buf_rj;
5070:   merge->coi          = NULL;
5071:   merge->coj          = NULL;
5072:   merge->owners_co    = NULL;

5074:   PetscCommDestroy(&comm);

5076:   /* attach the supporting struct to B_mpi for reuse */
5077:   PetscContainerCreate(PETSC_COMM_SELF,&container);
5078:   PetscContainerSetPointer(container,merge);
5079:   PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
5080:   PetscContainerDestroy(&container);
5081:   *mpimat = B_mpi;

5083:   PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
5084:   return(0);
5085: }

5087: /*@C
5088:       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
5089:                  matrices from each processor

5091:     Collective

5093:    Input Parameters:
5094: +    comm - the communicators the parallel matrix will live on
5095: .    seqmat - the input sequential matrices
5096: .    m - number of local rows (or PETSC_DECIDE)
5097: .    n - number of local columns (or PETSC_DECIDE)
5098: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5100:    Output Parameter:
5101: .    mpimat - the parallel matrix generated

5103:     Level: advanced

5105:    Notes:
5106:      The dimensions of the sequential matrix in each processor MUST be the same.
5107:      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
5108:      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
5109: @*/
5110: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
5111: {
5113:   PetscMPIInt    size;

5116:   MPI_Comm_size(comm,&size);
5117:   if (size == 1) {
5118:     PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5119:     if (scall == MAT_INITIAL_MATRIX) {
5120:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
5121:     } else {
5122:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
5123:     }
5124:     PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5125:     return(0);
5126:   }
5127:   PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
5128:   if (scall == MAT_INITIAL_MATRIX) {
5129:     MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
5130:   }
5131:   MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
5132:   PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
5133:   return(0);
5134: }

5136: /*@
5137:      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
5138:           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
5139:           with MatGetSize()

5141:     Not Collective

5143:    Input Parameters:
5144: +    A - the matrix
5145: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5147:    Output Parameter:
5148: .    A_loc - the local sequential matrix generated

5150:     Level: developer

5152: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()

5154: @*/
5155: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
5156: {
5158:   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
5159:   Mat_SeqAIJ     *mat,*a,*b;
5160:   PetscInt       *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
5161:   MatScalar      *aa,*ba,*cam;
5162:   PetscScalar    *ca;
5163:   PetscInt       am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
5164:   PetscInt       *ci,*cj,col,ncols_d,ncols_o,jo;
5165:   PetscBool      match;
5166:   MPI_Comm       comm;
5167:   PetscMPIInt    size;

5170:   PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
5171:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5172:   PetscObjectGetComm((PetscObject)A,&comm);
5173:   MPI_Comm_size(comm,&size);
5174:   if (size == 1 && scall == MAT_REUSE_MATRIX) return(0);

5176:   PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5177:   a = (Mat_SeqAIJ*)(mpimat->A)->data;
5178:   b = (Mat_SeqAIJ*)(mpimat->B)->data;
5179:   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5180:   aa = a->a; ba = b->a;
5181:   if (scall == MAT_INITIAL_MATRIX) {
5182:     if (size == 1) {
5183:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);
5184:       return(0);
5185:     }

5187:     PetscMalloc1(1+am,&ci);
5188:     ci[0] = 0;
5189:     for (i=0; i<am; i++) {
5190:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5191:     }
5192:     PetscMalloc1(1+ci[am],&cj);
5193:     PetscMalloc1(1+ci[am],&ca);
5194:     k    = 0;
5195:     for (i=0; i<am; i++) {
5196:       ncols_o = bi[i+1] - bi[i];
5197:       ncols_d = ai[i+1] - ai[i];
5198:       /* off-diagonal portion of A */
5199:       for (jo=0; jo<ncols_o; jo++) {
5200:         col = cmap[*bj];
5201:         if (col >= cstart) break;
5202:         cj[k]   = col; bj++;
5203:         ca[k++] = *ba++;
5204:       }
5205:       /* diagonal portion of A */
5206:       for (j=0; j<ncols_d; j++) {
5207:         cj[k]   = cstart + *aj++;
5208:         ca[k++] = *aa++;
5209:       }
5210:       /* off-diagonal portion of A */
5211:       for (j=jo; j<ncols_o; j++) {
5212:         cj[k]   = cmap[*bj++];
5213:         ca[k++] = *ba++;
5214:       }
5215:     }
5216:     /* put together the new matrix */
5217:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5218:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5219:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5220:     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
5221:     mat->free_a  = PETSC_TRUE;
5222:     mat->free_ij = PETSC_TRUE;
5223:     mat->nonew   = 0;
5224:   } else if (scall == MAT_REUSE_MATRIX) {
5225:     mat=(Mat_SeqAIJ*)(*A_loc)->data;
5226:     ci = mat->i; cj = mat->j; cam = mat->a;
5227:     for (i=0; i<am; i++) {
5228:       /* off-diagonal portion of A */
5229:       ncols_o = bi[i+1] - bi[i];
5230:       for (jo=0; jo<ncols_o; jo++) {
5231:         col = cmap[*bj];
5232:         if (col >= cstart) break;
5233:         *cam++ = *ba++; bj++;
5234:       }
5235:       /* diagonal portion of A */
5236:       ncols_d = ai[i+1] - ai[i];
5237:       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5238:       /* off-diagonal portion of A */
5239:       for (j=jo; j<ncols_o; j++) {
5240:         *cam++ = *ba++; bj++;
5241:       }
5242:     }
5243:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5244:   PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5245:   return(0);
5246: }

5248: /*@C
5249:      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns

5251:     Not Collective

5253:    Input Parameters:
5254: +    A - the matrix
5255: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5256: -    row, col - index sets of rows and columns to extract (or NULL)

5258:    Output Parameter:
5259: .    A_loc - the local sequential matrix generated

5261:     Level: developer

5263: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()

5265: @*/
5266: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5267: {
5268:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5270:   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5271:   IS             isrowa,iscola;
5272:   Mat            *aloc;
5273:   PetscBool      match;

5276:   PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5277:   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5278:   PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5279:   if (!row) {
5280:     start = A->rmap->rstart; end = A->rmap->rend;
5281:     ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5282:   } else {
5283:     isrowa = *row;
5284:   }
5285:   if (!col) {
5286:     start = A->cmap->rstart;
5287:     cmap  = a->garray;
5288:     nzA   = a->A->cmap->n;
5289:     nzB   = a->B->cmap->n;
5290:     PetscMalloc1(nzA+nzB, &idx);
5291:     ncols = 0;
5292:     for (i=0; i<nzB; i++) {
5293:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5294:       else break;
5295:     }
5296:     imark = i;
5297:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5298:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5299:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5300:   } else {
5301:     iscola = *col;
5302:   }
5303:   if (scall != MAT_INITIAL_MATRIX) {
5304:     PetscMalloc1(1,&aloc);
5305:     aloc[0] = *A_loc;
5306:   }
5307:   MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5308:   if (!col) { /* attach global id of condensed columns */
5309:     PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5310:   }
5311:   *A_loc = aloc[0];
5312:   PetscFree(aloc);
5313:   if (!row) {
5314:     ISDestroy(&isrowa);
5315:   }
5316:   if (!col) {
5317:     ISDestroy(&iscola);
5318:   }
5319:   PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5320:   return(0);
5321: }

5323: /*
5324:  * Destroy a mat that may be composed with PetscSF communication objects.
5325:  * The SF objects were created in MatCreateSeqSubMatrixWithRows_Private.
5326:  * */
5327: PetscErrorCode MatDestroy_SeqAIJ_PetscSF(Mat mat)
5328: {
5329:   PetscSF          sf,osf;
5330:   IS               map;
5331:   PetscErrorCode   ierr;

5334:   PetscObjectQuery((PetscObject)mat,"diagsf",(PetscObject*)&sf);
5335:   PetscObjectQuery((PetscObject)mat,"offdiagsf",(PetscObject*)&osf);
5336:   PetscSFDestroy(&sf);
5337:   PetscSFDestroy(&osf);
5338:   PetscObjectQuery((PetscObject)mat,"aoffdiagtopothmapping",(PetscObject*)&map);
5339:   ISDestroy(&map);
5340:   MatDestroy_SeqAIJ(mat);
5341:   return(0);
5342: }

5344: /*
5345:  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5346:  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5347:  * on a global size.
5348:  * */
5349: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5350: {
5351:   Mat_MPIAIJ               *p=(Mat_MPIAIJ*)P->data;
5352:   Mat_SeqAIJ               *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5353:   PetscInt                 plocalsize,nrows,*ilocal,*oilocal,i,owner,lidx,*nrcols,*nlcols,ncol;
5354:   PetscSFNode              *iremote,*oiremote;
5355:   const PetscInt           *lrowindices;
5356:   PetscErrorCode           ierr;
5357:   PetscSF                  sf,osf;
5358:   PetscInt                 pcstart,*roffsets,*loffsets,*pnnz,j;
5359:   PetscInt                 ontotalcols,dntotalcols,ntotalcols,nout;
5360:   MPI_Comm                 comm;
5361:   ISLocalToGlobalMapping   mapping;

5364:   PetscObjectGetComm((PetscObject)P,&comm);
5365:   /* plocalsize is the number of roots
5366:    * nrows is the number of leaves
5367:    * */
5368:   MatGetLocalSize(P,&plocalsize,NULL);
5369:   ISGetLocalSize(rows,&nrows);
5370:   PetscCalloc1(nrows,&iremote);
5371:   ISGetIndices(rows,&lrowindices);
5372:   for (i=0;i<nrows;i++) {
5373:     /* Find a remote index and an owner for a row
5374:      * The row could be local or remote
5375:      * */
5376:     owner = 0;
5377:     lidx  = 0;
5378:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5379:     iremote[i].index = lidx;
5380:     iremote[i].rank  = owner;
5381:   }
5382:   /* Create SF to communicate how many nonzero columns for each row */
5383:   PetscSFCreate(comm,&sf);
5384:   /* SF will figure out the number of nonzero colunms for each row, and their
5385:    * offsets
5386:    * */
5387:   PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5388:   PetscSFSetFromOptions(sf);
5389:   PetscSFSetUp(sf);

5391:   PetscCalloc1(2*(plocalsize+1),&roffsets);
5392:   PetscCalloc1(2*plocalsize,&nrcols);
5393:   PetscCalloc1(nrows,&pnnz);
5394:   roffsets[0] = 0;
5395:   roffsets[1] = 0;
5396:   for (i=0;i<plocalsize;i++) {
5397:     /* diag */
5398:     nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5399:     /* off diag */
5400:     nrcols[i*2+1] = po->i[i+1] - po->i[i];
5401:     /* compute offsets so that we relative location for each row */
5402:     roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5403:     roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5404:   }
5405:   PetscCalloc1(2*nrows,&nlcols);
5406:   PetscCalloc1(2*nrows,&loffsets);
5407:   /* 'r' means root, and 'l' means leaf */
5408:   PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols);
5409:   PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets);
5410:   PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols);
5411:   PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets);
5412:   PetscSFDestroy(&sf);
5413:   PetscFree(roffsets);
5414:   PetscFree(nrcols);
5415:   dntotalcols = 0;
5416:   ontotalcols = 0;
5417:   ncol = 0;
5418:   for (i=0;i<nrows;i++) {
5419:     pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5420:     ncol = PetscMax(pnnz[i],ncol);
5421:     /* diag */
5422:     dntotalcols += nlcols[i*2+0];
5423:     /* off diag */
5424:     ontotalcols += nlcols[i*2+1];
5425:   }
5426:   /* We do not need to figure the right number of columns
5427:    * since all the calculations will be done by going through the raw data
5428:    * */
5429:   MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5430:   MatSetUp(*P_oth);
5431:   PetscFree(pnnz);
5432:   p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5433:   /* diag */
5434:   PetscCalloc1(dntotalcols,&iremote);
5435:   /* off diag */
5436:   PetscCalloc1(ontotalcols,&oiremote);
5437:   /* diag */
5438:   PetscCalloc1(dntotalcols,&ilocal);
5439:   /* off diag */
5440:   PetscCalloc1(ontotalcols,&oilocal);
5441:   dntotalcols = 0;
5442:   ontotalcols = 0;
5443:   ntotalcols  = 0;
5444:   for (i=0;i<nrows;i++) {
5445:     owner = 0;
5446:     PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5447:     /* Set iremote for diag matrix */
5448:     for (j=0;j<nlcols[i*2+0];j++) {
5449:       iremote[dntotalcols].index   = loffsets[i*2+0] + j;
5450:       iremote[dntotalcols].rank    = owner;
5451:       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5452:       ilocal[dntotalcols++]        = ntotalcols++;
5453:     }
5454:     /* off diag */
5455:     for (j=0;j<nlcols[i*2+1];j++) {
5456:       oiremote[ontotalcols].index   = loffsets[i*2+1] + j;
5457:       oiremote[ontotalcols].rank    = owner;
5458:       oilocal[ontotalcols++]        = ntotalcols++;
5459:     }
5460:   }
5461:   ISRestoreIndices(rows,&lrowindices);
5462:   PetscFree(loffsets);
5463:   PetscFree(nlcols);
5464:   PetscSFCreate(comm,&sf);
5465:   /* P serves as roots and P_oth is leaves
5466:    * Diag matrix
5467:    * */
5468:   PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5469:   PetscSFSetFromOptions(sf);
5470:   PetscSFSetUp(sf);

5472:   PetscSFCreate(comm,&osf);
5473:   /* Off diag */
5474:   PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5475:   PetscSFSetFromOptions(osf);
5476:   PetscSFSetUp(osf);
5477:   /* We operate on the matrix internal data for saving memory */
5478:   PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5479:   PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5480:   MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5481:   /* Convert to global indices for diag matrix */
5482:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5483:   PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j);
5484:   /* We want P_oth store global indices */
5485:   ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5486:   /* Use memory scalable approach */
5487:   ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5488:   ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5489:   PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j);
5490:   PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j);
5491:   /* Convert back to local indices */
5492:   for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5493:   PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j);
5494:   nout = 0;
5495:   ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5496:   if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5497:   ISLocalToGlobalMappingDestroy(&mapping);
5498:   /* Exchange values */
5499:   PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5500:   PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5501:   /* Stop PETSc from shrinking memory */
5502:   for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5503:   MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5504:   MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5505:   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5506:   PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5507:   PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5508:   /* ``New MatDestroy" takes care of PetscSF objects as well */
5509:   (*P_oth)->ops->destroy = MatDestroy_SeqAIJ_PetscSF;
5510:   return(0);
5511: }

5513: /*
5514:  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5515:  * This supports MPIAIJ and MAIJ
5516:  * */
5517: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5518: {
5519:   Mat_MPIAIJ            *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5520:   Mat_SeqAIJ            *p_oth;
5521:   Mat_SeqAIJ            *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5522:   IS                    rows,map;
5523:   PetscHMapI            hamp;
5524:   PetscInt              i,htsize,*rowindices,off,*mapping,key,count;
5525:   MPI_Comm              comm;
5526:   PetscSF               sf,osf;
5527:   PetscBool             has;
5528:   PetscErrorCode        ierr;

5531:   PetscObjectGetComm((PetscObject)A,&comm);
5532:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5533:   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5534:    *  and then create a submatrix (that often is an overlapping matrix)
5535:    * */
5536:   if (reuse==MAT_INITIAL_MATRIX) {
5537:     /* Use a hash table to figure out unique keys */
5538:     PetscHMapICreate(&hamp);
5539:     PetscHMapIResize(hamp,a->B->cmap->n);
5540:     PetscCalloc1(a->B->cmap->n,&mapping);
5541:     count = 0;
5542:     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5543:     for (i=0;i<a->B->cmap->n;i++) {
5544:       key  = a->garray[i]/dof;
5545:       PetscHMapIHas(hamp,key,&has);
5546:       if (!has) {
5547:         mapping[i] = count;
5548:         PetscHMapISet(hamp,key,count++);
5549:       } else {
5550:         /* Current 'i' has the same value the previous step */
5551:         mapping[i] = count-1;
5552:       }
5553:     }
5554:     ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5555:     PetscHMapIGetSize(hamp,&htsize);
5556:     if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5557:     PetscCalloc1(htsize,&rowindices);
5558:     off = 0;
5559:     PetscHMapIGetKeys(hamp,&off,rowindices);
5560:     PetscHMapIDestroy(&hamp);
5561:     PetscSortInt(htsize,rowindices);
5562:     ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5563:     /* In case, the matrix was already created but users want to recreate the matrix */
5564:     MatDestroy(P_oth);
5565:     MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5566:     PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5567:     ISDestroy(&rows);
5568:   } else if (reuse==MAT_REUSE_MATRIX) {
5569:     /* If matrix was already created, we simply update values using SF objects
5570:      * that as attached to the matrix ealier.
5571:      *  */
5572:     PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5573:     PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5574:     if (!sf || !osf) {
5575:       SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet \n");
5576:     }
5577:     p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5578:     /* Update values in place */
5579:     PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5580:     PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5581:     PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5582:     PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5583:   } else {
5584:     SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type \n");
5585:   }
5586:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5587:   return(0);
5588: }

5590: /*@C
5591:     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A

5593:     Collective on Mat

5595:    Input Parameters:
5596: +    A,B - the matrices in mpiaij format
5597: .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5598: -    rowb, colb - index sets of rows and columns of B to extract (or NULL)

5600:    Output Parameter:
5601: +    rowb, colb - index sets of rows and columns of B to extract
5602: -    B_seq - the sequential matrix generated

5604:     Level: developer

5606: @*/
5607: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5608: {
5609:   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
5611:   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5612:   IS             isrowb,iscolb;
5613:   Mat            *bseq=NULL;

5616:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5617:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5618:   }
5619:   PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);

5621:   if (scall == MAT_INITIAL_MATRIX) {
5622:     start = A->cmap->rstart;
5623:     cmap  = a->garray;
5624:     nzA   = a->A->cmap->n;
5625:     nzB   = a->B->cmap->n;
5626:     PetscMalloc1(nzA+nzB, &idx);
5627:     ncols = 0;
5628:     for (i=0; i<nzB; i++) {  /* row < local row index */
5629:       if (cmap[i] < start) idx[ncols++] = cmap[i];
5630:       else break;
5631:     }
5632:     imark = i;
5633:     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
5634:     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5635:     ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5636:     ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5637:   } else {
5638:     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5639:     isrowb  = *rowb; iscolb = *colb;
5640:     PetscMalloc1(1,&bseq);
5641:     bseq[0] = *B_seq;
5642:   }
5643:   MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5644:   *B_seq = bseq[0];
5645:   PetscFree(bseq);
5646:   if (!rowb) {
5647:     ISDestroy(&isrowb);
5648:   } else {
5649:     *rowb = isrowb;
5650:   }
5651:   if (!colb) {
5652:     ISDestroy(&iscolb);
5653:   } else {
5654:     *colb = iscolb;
5655:   }
5656:   PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5657:   return(0);
5658: }

5660: /*
5661:     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5662:     of the OFF-DIAGONAL portion of local A

5664:     Collective on Mat

5666:    Input Parameters:
5667: +    A,B - the matrices in mpiaij format
5668: -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

5670:    Output Parameter:
5671: +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5672: .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5673: .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5674: -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N

5676:     Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5677:      for this matrix. This is not desirable..

5679:     Level: developer

5681: */
5682: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5683: {
5684:   PetscErrorCode         ierr;
5685:   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
5686:   Mat_SeqAIJ             *b_oth;
5687:   VecScatter             ctx;
5688:   MPI_Comm               comm;
5689:   const PetscMPIInt      *rprocs,*sprocs;
5690:   const PetscInt         *srow,*rstarts,*sstarts;
5691:   PetscInt               *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5692:   PetscInt               i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5693:   PetscScalar            *b_otha,*bufa,*bufA,*vals = NULL;
5694:   MPI_Request            *rwaits = NULL,*swaits = NULL;
5695:   MPI_Status             rstatus;
5696:   PetscMPIInt            jj,size,tag,rank,nsends_mpi,nrecvs_mpi;

5699:   PetscObjectGetComm((PetscObject)A,&comm);
5700:   MPI_Comm_size(comm,&size);

5702:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5703:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5704:   }
5705:   PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5706:   MPI_Comm_rank(comm,&rank);

5708:   if (size == 1) {
5709:     startsj_s = NULL;
5710:     bufa_ptr  = NULL;
5711:     *B_oth    = NULL;
5712:     return(0);
5713:   }

5715:   ctx = a->Mvctx;
5716:   tag = ((PetscObject)ctx)->tag;

5718:   if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5719:   VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5720:   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5721:   VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5722:   PetscMPIIntCast(nsends,&nsends_mpi);
5723:   PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5724:   PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);

5726:   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5727:   if (scall == MAT_INITIAL_MATRIX) {
5728:     /* i-array */
5729:     /*---------*/
5730:     /*  post receives */
5731:     if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5732:     for (i=0; i<nrecvs; i++) {
5733:       rowlen = rvalues + rstarts[i]*rbs;
5734:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5735:       MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5736:     }

5738:     /* pack the outgoing message */
5739:     PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);

5741:     sstartsj[0] = 0;
5742:     rstartsj[0] = 0;
5743:     len         = 0; /* total length of j or a array to be sent */
5744:     if (nsends) {
5745:       k    = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5746:       PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5747:     }
5748:     for (i=0; i<nsends; i++) {
5749:       rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5750:       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
5751:       for (j=0; j<nrows; j++) {
5752:         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5753:         for (l=0; l<sbs; l++) {
5754:           MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */

5756:           rowlen[j*sbs+l] = ncols;

5758:           len += ncols;
5759:           MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5760:         }
5761:         k++;
5762:       }
5763:       MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);

5765:       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5766:     }
5767:     /* recvs and sends of i-array are completed */
5768:     i = nrecvs;
5769:     while (i--) {
5770:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5771:     }
5772:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5773:     PetscFree(svalues);

5775:     /* allocate buffers for sending j and a arrays */
5776:     PetscMalloc1(len+1,&bufj);
5777:     PetscMalloc1(len+1,&bufa);

5779:     /* create i-array of B_oth */
5780:     PetscMalloc1(aBn+2,&b_othi);

5782:     b_othi[0] = 0;
5783:     len       = 0; /* total length of j or a array to be received */
5784:     k         = 0;
5785:     for (i=0; i<nrecvs; i++) {
5786:       rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5787:       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5788:       for (j=0; j<nrows; j++) {
5789:         b_othi[k+1] = b_othi[k] + rowlen[j];
5790:         PetscIntSumError(rowlen[j],len,&len);
5791:         k++;
5792:       }
5793:       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5794:     }
5795:     PetscFree(rvalues);

5797:     /* allocate space for j and a arrrays of B_oth */
5798:     PetscMalloc1(b_othi[aBn]+1,&b_othj);
5799:     PetscMalloc1(b_othi[aBn]+1,&b_otha);

5801:     /* j-array */
5802:     /*---------*/
5803:     /*  post receives of j-array */
5804:     for (i=0; i<nrecvs; i++) {
5805:       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5806:       MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5807:     }

5809:     /* pack the outgoing message j-array */
5810:     if (nsends) k = sstarts[0];
5811:     for (i=0; i<nsends; i++) {
5812:       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5813:       bufJ  = bufj+sstartsj[i];
5814:       for (j=0; j<nrows; j++) {
5815:         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5816:         for (ll=0; ll<sbs; ll++) {
5817:           MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5818:           for (l=0; l<ncols; l++) {
5819:             *bufJ++ = cols[l];
5820:           }
5821:           MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5822:         }
5823:       }
5824:       MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5825:     }

5827:     /* recvs and sends of j-array are completed */
5828:     i = nrecvs;
5829:     while (i--) {
5830:       MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5831:     }
5832:     if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5833:   } else if (scall == MAT_REUSE_MATRIX) {
5834:     sstartsj = *startsj_s;
5835:     rstartsj = *startsj_r;
5836:     bufa     = *bufa_ptr;
5837:     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5838:     b_otha   = b_oth->a;
5839:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");

5841:   /* a-array */
5842:   /*---------*/
5843:   /*  post receives of a-array */
5844:   for (i=0; i<nrecvs; i++) {
5845:     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5846:     MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5847:   }

5849:   /* pack the outgoing message a-array */
5850:   if (nsends) k = sstarts[0];
5851:   for (i=0; i<nsends; i++) {
5852:     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5853:     bufA  = bufa+sstartsj[i];
5854:     for (j=0; j<nrows; j++) {
5855:       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5856:       for (ll=0; ll<sbs; ll++) {
5857:         MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5858:         for (l=0; l<ncols; l++) {
5859:           *bufA++ = vals[l];
5860:         }
5861:         MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5862:       }
5863:     }
5864:     MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5865:   }
5866:   /* recvs and sends of a-array are completed */
5867:   i = nrecvs;
5868:   while (i--) {
5869:     MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5870:   }
5871:   if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5872:   PetscFree2(rwaits,swaits);

5874:   if (scall == MAT_INITIAL_MATRIX) {
5875:     /* put together the new matrix */
5876:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);

5878:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5879:     /* Since these are PETSc arrays, change flags to free them as necessary. */
5880:     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5881:     b_oth->free_a  = PETSC_TRUE;
5882:     b_oth->free_ij = PETSC_TRUE;
5883:     b_oth->nonew   = 0;

5885:     PetscFree(bufj);
5886:     if (!startsj_s || !bufa_ptr) {
5887:       PetscFree2(sstartsj,rstartsj);
5888:       PetscFree(bufa_ptr);
5889:     } else {
5890:       *startsj_s = sstartsj;
5891:       *startsj_r = rstartsj;
5892:       *bufa_ptr  = bufa;
5893:     }
5894:   }

5896:   VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5897:   VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5898:   PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5899:   return(0);
5900: }

5902: /*@C
5903:   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.

5905:   Not Collective

5907:   Input Parameters:
5908: . A - The matrix in mpiaij format

5910:   Output Parameter:
5911: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5912: . colmap - A map from global column index to local index into lvec
5913: - multScatter - A scatter from the argument of a matrix-vector product to lvec

5915:   Level: developer

5917: @*/
5918: #if defined(PETSC_USE_CTABLE)
5919: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5920: #else
5921: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5922: #endif
5923: {
5924:   Mat_MPIAIJ *a;

5931:   a = (Mat_MPIAIJ*) A->data;
5932:   if (lvec) *lvec = a->lvec;
5933:   if (colmap) *colmap = a->colmap;
5934:   if (multScatter) *multScatter = a->Mvctx;
5935:   return(0);
5936: }

5938: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5939: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5940: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5941: #if defined(PETSC_HAVE_MKL_SPARSE)
5942: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5943: #endif
5944: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5945: #if defined(PETSC_HAVE_ELEMENTAL)
5946: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5947: #endif
5948: #if defined(PETSC_HAVE_HYPRE)
5949: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5950: PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
5951: #endif
5952: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5953: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5954: PETSC_INTERN PetscErrorCode MatPtAP_IS_XAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);

5956: /*
5957:     Computes (B'*A')' since computing B*A directly is untenable

5959:                n                       p                          p
5960:         (              )       (              )         (                  )
5961:       m (      A       )  *  n (       B      )   =   m (         C        )
5962:         (              )       (              )         (                  )

5964: */
5965: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5966: {
5968:   Mat            At,Bt,Ct;

5971:   MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5972:   MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5973:   MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
5974:   MatDestroy(&At);
5975:   MatDestroy(&Bt);
5976:   MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5977:   MatDestroy(&Ct);
5978:   return(0);
5979: }

5981: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
5982: {
5984:   PetscInt       m=A->rmap->n,n=B->cmap->n;
5985:   Mat            Cmat;

5988:   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5989:   MatCreate(PetscObjectComm((PetscObject)A),&Cmat);
5990:   MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5991:   MatSetBlockSizesFromMats(Cmat,A,B);
5992:   MatSetType(Cmat,MATMPIDENSE);
5993:   MatMPIDenseSetPreallocation(Cmat,NULL);
5994:   MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
5995:   MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);

5997:   Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;

5999:   *C = Cmat;
6000:   return(0);
6001: }

6003: /* ----------------------------------------------------------------*/
6004: PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
6005: {

6009:   if (scall == MAT_INITIAL_MATRIX) {
6010:     PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
6011:     MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);
6012:     PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
6013:   }
6014:   PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
6015:   MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);
6016:   PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
6017:   return(0);
6018: }

6020: /*MC
6021:    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.

6023:    Options Database Keys:
6024: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()

6026:    Level: beginner

6028:    Notes:
6029:     MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
6030:     in this case the values associated with the rows and columns one passes in are set to zero
6031:     in the matrix

6033:     MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
6034:     space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored

6036: .seealso: MatCreateAIJ()
6037: M*/

6039: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6040: {
6041:   Mat_MPIAIJ     *b;
6043:   PetscMPIInt    size;

6046:   MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);

6048:   PetscNewLog(B,&b);
6049:   B->data       = (void*)b;
6050:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
6051:   B->assembled  = PETSC_FALSE;
6052:   B->insertmode = NOT_SET_VALUES;
6053:   b->size       = size;

6055:   MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);

6057:   /* build cache for off array entries formed */
6058:   MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);

6060:   b->donotstash  = PETSC_FALSE;
6061:   b->colmap      = 0;
6062:   b->garray      = 0;
6063:   b->roworiented = PETSC_TRUE;

6065:   /* stuff used for matrix vector multiply */
6066:   b->lvec  = NULL;
6067:   b->Mvctx = NULL;

6069:   /* stuff for MatGetRow() */
6070:   b->rowindices   = 0;
6071:   b->rowvalues    = 0;
6072:   b->getrowactive = PETSC_FALSE;

6074:   /* flexible pointer used in CUSP/CUSPARSE classes */
6075:   b->spptr = NULL;

6077:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
6078:   PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
6079:   PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
6080:   PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
6081:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
6082:   PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
6083:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
6084:   PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
6085:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
6086:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
6087: #if defined(PETSC_HAVE_MKL_SPARSE)
6088:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
6089: #endif
6090:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
6091:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
6092: #if defined(PETSC_HAVE_ELEMENTAL)
6093:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
6094: #endif
6095: #if defined(PETSC_HAVE_HYPRE)
6096:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
6097: #endif
6098:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
6099:   PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
6100:   PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);
6101:   PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
6102:   PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
6103: #if defined(PETSC_HAVE_HYPRE)
6104:   PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);
6105: #endif
6106:   PetscObjectComposeFunction((PetscObject)B,"MatPtAP_is_mpiaij_C",MatPtAP_IS_XAIJ);
6107:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
6108:   return(0);
6109: }

6111: /*@C
6112:      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
6113:          and "off-diagonal" part of the matrix in CSR format.

6115:    Collective

6117:    Input Parameters:
6118: +  comm - MPI communicator
6119: .  m - number of local rows (Cannot be PETSC_DECIDE)
6120: .  n - This value should be the same as the local size used in creating the
6121:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
6122:        calculated if N is given) For square matrices n is almost always m.
6123: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
6124: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
6125: .   i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6126: .   j - column indices
6127: .   a - matrix values
6128: .   oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6129: .   oj - column indices
6130: -   oa - matrix values

6132:    Output Parameter:
6133: .   mat - the matrix

6135:    Level: advanced

6137:    Notes:
6138:        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6139:        must free the arrays once the matrix has been destroyed and not before.

6141:        The i and j indices are 0 based

6143:        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix

6145:        This sets local rows and cannot be used to set off-processor values.

6147:        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6148:        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6149:        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6150:        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6151:        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
6152:        communication if it is known that only local entries will be set.

6154: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
6155:           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
6156: @*/
6157: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
6158: {
6160:   Mat_MPIAIJ     *maij;

6163:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
6164:   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
6165:   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
6166:   MatCreate(comm,mat);
6167:   MatSetSizes(*mat,m,n,M,N);
6168:   MatSetType(*mat,MATMPIAIJ);
6169:   maij = (Mat_MPIAIJ*) (*mat)->data;

6171:   (*mat)->preallocated = PETSC_TRUE;

6173:   PetscLayoutSetUp((*mat)->rmap);
6174:   PetscLayoutSetUp((*mat)->cmap);

6176:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6177:   MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);

6179:   MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
6180:   MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
6181:   MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
6182:   MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);

6184:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6185:   MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6186:   MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6187:   MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6188:   MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6189:   return(0);
6190: }

6192: /*
6193:     Special version for direct calls from Fortran
6194: */
6195:  #include <petsc/private/fortranimpl.h>

6197: /* Change these macros so can be used in void function */
6198: #undef CHKERRQ
6199: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
6200: #undef SETERRQ2
6201: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
6202: #undef SETERRQ3
6203: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6204: #undef SETERRQ
6205: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)

6207: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6208: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6209: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6210: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6211: #else
6212: #endif
6213: PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6214: {
6215:   Mat            mat  = *mmat;
6216:   PetscInt       m    = *mm, n = *mn;
6217:   InsertMode     addv = *maddv;
6218:   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
6219:   PetscScalar    value;

6222:   MatCheckPreallocated(mat,1);
6223:   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;

6225: #if defined(PETSC_USE_DEBUG)
6226:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6227: #endif
6228:   {
6229:     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
6230:     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6231:     PetscBool roworiented = aij->roworiented;

6233:     /* Some Variables required in the macro */
6234:     Mat        A                    = aij->A;
6235:     Mat_SeqAIJ *a                   = (Mat_SeqAIJ*)A->data;
6236:     PetscInt   *aimax               = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6237:     MatScalar  *aa                  = a->a;
6238:     PetscBool  ignorezeroentries    = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6239:     Mat        B                    = aij->B;
6240:     Mat_SeqAIJ *b                   = (Mat_SeqAIJ*)B->data;
6241:     PetscInt   *bimax               = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6242:     MatScalar  *ba                  = b->a;
6243:     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
6244:      * cannot use "#if defined" inside a macro. */
6245:     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;

6247:     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6248:     PetscInt  nonew = a->nonew;
6249:     MatScalar *ap1,*ap2;

6252:     for (i=0; i<m; i++) {
6253:       if (im[i] < 0) continue;
6254: #if defined(PETSC_USE_DEBUG)
6255:       if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6256: #endif
6257:       if (im[i] >= rstart && im[i] < rend) {
6258:         row      = im[i] - rstart;
6259:         lastcol1 = -1;
6260:         rp1      = aj + ai[row];
6261:         ap1      = aa + ai[row];
6262:         rmax1    = aimax[row];
6263:         nrow1    = ailen[row];
6264:         low1     = 0;
6265:         high1    = nrow1;
6266:         lastcol2 = -1;
6267:         rp2      = bj + bi[row];
6268:         ap2      = ba + bi[row];
6269:         rmax2    = bimax[row];
6270:         nrow2    = bilen[row];
6271:         low2     = 0;
6272:         high2    = nrow2;

6274:         for (j=0; j<n; j++) {
6275:           if (roworiented) value = v[i*n+j];
6276:           else value = v[i+j*m];
6277:           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
6278:           if (in[j] >= cstart && in[j] < cend) {
6279:             col = in[j] - cstart;
6280:             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6281: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6282:             if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
6283: #endif
6284:           } else if (in[j] < 0) continue;
6285: #if defined(PETSC_USE_DEBUG)
6286:           /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6287:           else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
6288: #endif
6289:           else {
6290:             if (mat->was_assembled) {
6291:               if (!aij->colmap) {
6292:                 MatCreateColmap_MPIAIJ_Private(mat);
6293:               }
6294: #if defined(PETSC_USE_CTABLE)
6295:               PetscTableFind(aij->colmap,in[j]+1,&col);
6296:               col--;
6297: #else
6298:               col = aij->colmap[in[j]] - 1;
6299: #endif
6300:               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6301:                 MatDisAssemble_MPIAIJ(mat);
6302:                 col  =  in[j];
6303:                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6304:                 B        = aij->B;
6305:                 b        = (Mat_SeqAIJ*)B->data;
6306:                 bimax    = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6307:                 rp2      = bj + bi[row];
6308:                 ap2      = ba + bi[row];
6309:                 rmax2    = bimax[row];
6310:                 nrow2    = bilen[row];
6311:                 low2     = 0;
6312:                 high2    = nrow2;
6313:                 bm       = aij->B->rmap->n;
6314:                 ba       = b->a;
6315:                 inserted = PETSC_FALSE;
6316:               }
6317:             } else col = in[j];
6318:             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6319: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6320:             if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
6321: #endif
6322:           }
6323:         }
6324:       } else if (!aij->donotstash) {
6325:         if (roworiented) {
6326:           MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6327:         } else {
6328:           MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6329:         }
6330:       }
6331:     }
6332:   }
6333:   PetscFunctionReturnVoid();
6334: }