Actual source code: mpisbaij.c

petsc-3.3-p7 2013-05-11
  2: #include <../src/mat/impls/baij/mpi/mpibaij.h>    /*I "petscmat.h" I*/
  3: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
  4: #include <../src/mat/impls/sbaij/seq/sbaij.h>
  5: #include <petscblaslapack.h>

  7: extern PetscErrorCode MatSetUpMultiply_MPISBAIJ(Mat);
  8: extern PetscErrorCode MatSetUpMultiply_MPISBAIJ_2comm(Mat);
  9: extern PetscErrorCode MatDisAssemble_MPISBAIJ(Mat);
 10: extern PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat,PetscInt,IS[],PetscInt);
 11: extern PetscErrorCode MatGetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 12: extern PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 13: extern PetscErrorCode MatSetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt [],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
 14: extern PetscErrorCode MatSetValuesBlocked_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 15: extern PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 16: extern PetscErrorCode MatGetRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 17: extern PetscErrorCode MatRestoreRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 18: extern PetscErrorCode MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*,Vec,Vec);
 19: extern PetscErrorCode MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *,Vec,Vec);
 20: extern PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat,Vec,PetscInt[]);
 21: extern PetscErrorCode MatSOR_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,PetscInt,PetscInt,Vec);

 23: EXTERN_C_BEGIN
 26: PetscErrorCode  MatStoreValues_MPISBAIJ(Mat mat)
 27: {
 28:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 32:   MatStoreValues(aij->A);
 33:   MatStoreValues(aij->B);
 34:   return(0);
 35: }
 36: EXTERN_C_END

 38: EXTERN_C_BEGIN
 41: PetscErrorCode  MatRetrieveValues_MPISBAIJ(Mat mat)
 42: {
 43:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 47:   MatRetrieveValues(aij->A);
 48:   MatRetrieveValues(aij->B);
 49:   return(0);
 50: }
 51: EXTERN_C_END


 54: #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
 55: { \
 56:  \
 57:     brow = row/bs;  \
 58:     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
 59:     rmax = aimax[brow]; nrow = ailen[brow]; \
 60:       bcol = col/bs; \
 61:       ridx = row % bs; cidx = col % bs; \
 62:       low = 0; high = nrow; \
 63:       while (high-low > 3) { \
 64:         t = (low+high)/2; \
 65:         if (rp[t] > bcol) high = t; \
 66:         else              low  = t; \
 67:       } \
 68:       for (_i=low; _i<high; _i++) { \
 69:         if (rp[_i] > bcol) break; \
 70:         if (rp[_i] == bcol) { \
 71:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
 72:           if (addv == ADD_VALUES) *bap += value;  \
 73:           else                    *bap  = value;  \
 74:           goto a_noinsert; \
 75:         } \
 76:       } \
 77:       if (a->nonew == 1) goto a_noinsert; \
 78:       if (a->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
 79:       MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
 80:       N = nrow++ - 1;  \
 81:       /* shift up all the later entries in this row */ \
 82:       for (ii=N; ii>=_i; ii--) { \
 83:         rp[ii+1] = rp[ii]; \
 84:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
 85:       } \
 86:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); }  \
 87:       rp[_i]                      = bcol;  \
 88:       ap[bs2*_i + bs*cidx + ridx] = value;  \
 89:       a_noinsert:; \
 90:     ailen[brow] = nrow; \
 91: } 

 93: #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
 94: { \
 95:     brow = row/bs;  \
 96:     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
 97:     rmax = bimax[brow]; nrow = bilen[brow]; \
 98:       bcol = col/bs; \
 99:       ridx = row % bs; cidx = col % bs; \
100:       low = 0; high = nrow; \
101:       while (high-low > 3) { \
102:         t = (low+high)/2; \
103:         if (rp[t] > bcol) high = t; \
104:         else              low  = t; \
105:       } \
106:       for (_i=low; _i<high; _i++) { \
107:         if (rp[_i] > bcol) break; \
108:         if (rp[_i] == bcol) { \
109:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
110:           if (addv == ADD_VALUES) *bap += value;  \
111:           else                    *bap  = value;  \
112:           goto b_noinsert; \
113:         } \
114:       } \
115:       if (b->nonew == 1) goto b_noinsert; \
116:       if (b->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
117:       MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
118:       N = nrow++ - 1;  \
119:       /* shift up all the later entries in this row */ \
120:       for (ii=N; ii>=_i; ii--) { \
121:         rp[ii+1] = rp[ii]; \
122:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
123:       } \
124:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));}  \
125:       rp[_i]                      = bcol;  \
126:       ap[bs2*_i + bs*cidx + ridx] = value;  \
127:       b_noinsert:; \
128:     bilen[brow] = nrow; \
129: } 

131: /* Only add/insert a(i,j) with i<=j (blocks). 
132:    Any a(i,j) with i>j input by user is ingored. 
133: */
136: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
137: {
138:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
139:   MatScalar      value;
140:   PetscBool      roworiented = baij->roworiented;
142:   PetscInt       i,j,row,col;
143:   PetscInt       rstart_orig=mat->rmap->rstart;
144:   PetscInt       rend_orig=mat->rmap->rend,cstart_orig=mat->cmap->rstart;
145:   PetscInt       cend_orig=mat->cmap->rend,bs=mat->rmap->bs;

147:   /* Some Variables required in the macro */
148:   Mat            A = baij->A;
149:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(A)->data;
150:   PetscInt       *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
151:   MatScalar      *aa=a->a;

153:   Mat            B = baij->B;
154:   Mat_SeqBAIJ   *b = (Mat_SeqBAIJ*)(B)->data;
155:   PetscInt      *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
156:   MatScalar     *ba=b->a;

158:   PetscInt      *rp,ii,nrow,_i,rmax,N,brow,bcol;
159:   PetscInt      low,high,t,ridx,cidx,bs2=a->bs2;
160:   MatScalar     *ap,*bap;

162:   /* for stash */
163:   PetscInt      n_loc, *in_loc = PETSC_NULL;
164:   MatScalar     *v_loc = PETSC_NULL;

168:   if (!baij->donotstash){
169:     if (n > baij->n_loc) {
170:       PetscFree(baij->in_loc);
171:       PetscFree(baij->v_loc);
172:       PetscMalloc(n*sizeof(PetscInt),&baij->in_loc);
173:       PetscMalloc(n*sizeof(MatScalar),&baij->v_loc);
174:       baij->n_loc = n;
175:     }
176:     in_loc = baij->in_loc;
177:     v_loc  = baij->v_loc;
178:   }

180:   for (i=0; i<m; i++) {
181:     if (im[i] < 0) continue;
182: #if defined(PETSC_USE_DEBUG)
183:     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
184: #endif
185:     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
186:       row = im[i] - rstart_orig;              /* local row index */
187:       for (j=0; j<n; j++) {
188:         if (im[i]/bs > in[j]/bs){
189:           if (a->ignore_ltriangular){
190:             continue;    /* ignore lower triangular blocks */
191:           } else {
192:             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE)");
193:           }
194:         }
195:         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
196:           col = in[j] - cstart_orig;          /* local col index */
197:           brow = row/bs; bcol = col/bs;
198:           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
199:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
200:           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
201:           /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
202:         } else if (in[j] < 0) continue;
203: #if defined(PETSC_USE_DEBUG)
204:         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
205: #endif
206:         else {  /* off-diag entry (B) */
207:           if (mat->was_assembled) {
208:             if (!baij->colmap) {
209:               MatCreateColmap_MPIBAIJ_Private(mat);
210:             }
211: #if defined (PETSC_USE_CTABLE)
212:             PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
213:             col  = col - 1;
214: #else
215:             col = baij->colmap[in[j]/bs] - 1;
216: #endif
217:             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
218:               MatDisAssemble_MPISBAIJ(mat);
219:               col =  in[j];
220:               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
221:               B = baij->B;
222:               b = (Mat_SeqBAIJ*)(B)->data;
223:               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
224:               ba=b->a;
225:             } else col += in[j]%bs;
226:           } else col = in[j];
227:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
228:           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
229:           /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
230:         }
231:       }
232:     } else {  /* off processor entry */
233:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
234:       if (!baij->donotstash) {
235:         mat->assembled = PETSC_FALSE;
236:         n_loc = 0;
237:         for (j=0; j<n; j++){
238:           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
239:           in_loc[n_loc] = in[j];
240:           if (roworiented) {
241:             v_loc[n_loc] = v[i*n+j];
242:           } else {
243:             v_loc[n_loc] = v[j*m+i];
244:           }
245:           n_loc++;
246:         }
247:         MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc,PETSC_FALSE);
248:       }
249:     }
250:   }
251:   return(0);
252: }

256: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
257: {
258:   Mat_MPISBAIJ    *baij = (Mat_MPISBAIJ*)mat->data;
259:   const MatScalar *value;
260:   MatScalar       *barray=baij->barray;
261:   PetscBool       roworiented = baij->roworiented,ignore_ltriangular = ((Mat_SeqSBAIJ*)baij->A->data)->ignore_ltriangular;
262:   PetscErrorCode  ierr;
263:   PetscInt        i,j,ii,jj,row,col,rstart=baij->rstartbs;
264:   PetscInt        rend=baij->rendbs,cstart=baij->rstartbs,stepval;
265:   PetscInt        cend=baij->rendbs,bs=mat->rmap->bs,bs2=baij->bs2;

268:   if(!barray) {
269:     PetscMalloc(bs2*sizeof(MatScalar),&barray);
270:     baij->barray = barray;
271:   }

273:   if (roworiented) {
274:     stepval = (n-1)*bs;
275:   } else {
276:     stepval = (m-1)*bs;
277:   }
278:   for (i=0; i<m; i++) {
279:     if (im[i] < 0) continue;
280: #if defined(PETSC_USE_DEBUG)
281:     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
282: #endif
283:     if (im[i] >= rstart && im[i] < rend) {
284:       row = im[i] - rstart;
285:       for (j=0; j<n; j++) {
286:         if (im[i] > in[j]) {
287:           if (ignore_ltriangular) continue; /* ignore lower triangular blocks */
288:           else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE)");
289:         }
290:         /* If NumCol = 1 then a copy is not required */
291:         if ((roworiented) && (n == 1)) {
292:           barray = (MatScalar*) v + i*bs2;
293:         } else if((!roworiented) && (m == 1)) {
294:           barray = (MatScalar*) v + j*bs2;
295:         } else { /* Here a copy is required */
296:           if (roworiented) {
297:             value = v + i*(stepval+bs)*bs + j*bs;
298:           } else {
299:             value = v + j*(stepval+bs)*bs + i*bs;
300:           }
301:           for (ii=0; ii<bs; ii++,value+=stepval) {
302:             for (jj=0; jj<bs; jj++) {
303:               *barray++  = *value++;
304:             }
305:           }
306:           barray -=bs2;
307:         }
308: 
309:         if (in[j] >= cstart && in[j] < cend){
310:           col  = in[j] - cstart;
311:           MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
312:         }
313:         else if (in[j] < 0) continue;
314: #if defined(PETSC_USE_DEBUG)
315:         else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);
316: #endif
317:         else {
318:           if (mat->was_assembled) {
319:             if (!baij->colmap) {
320:               MatCreateColmap_MPIBAIJ_Private(mat);
321:             }

323: #if defined(PETSC_USE_DEBUG)
324: #if defined (PETSC_USE_CTABLE)
325:             { PetscInt data;
326:               PetscTableFind(baij->colmap,in[j]+1,&data);
327:               if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
328:             }
329: #else
330:             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
331: #endif
332: #endif
333: #if defined (PETSC_USE_CTABLE)
334:             PetscTableFind(baij->colmap,in[j]+1,&col);
335:             col  = (col - 1)/bs;
336: #else
337:             col = (baij->colmap[in[j]] - 1)/bs;
338: #endif
339:             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
340:               MatDisAssemble_MPISBAIJ(mat);
341:               col =  in[j];
342:             }
343:           }
344:           else col = in[j];
345:           MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
346:         }
347:       }
348:     } else {
349:       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
350:       if (!baij->donotstash) {
351:         if (roworiented) {
352:           MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
353:         } else {
354:           MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
355:         }
356:       }
357:     }
358:   }
359:   return(0);
360: }

364: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
365: {
366:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
368:   PetscInt       bs=mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend;
369:   PetscInt       bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data;

372:   for (i=0; i<m; i++) {
373:     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]); */
374:     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
375:     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
376:       row = idxm[i] - bsrstart;
377:       for (j=0; j<n; j++) {
378:         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]); */
379:         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
380:         if (idxn[j] >= bscstart && idxn[j] < bscend){
381:           col = idxn[j] - bscstart;
382:           MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
383:         } else {
384:           if (!baij->colmap) {
385:             MatCreateColmap_MPIBAIJ_Private(mat);
386:           }
387: #if defined (PETSC_USE_CTABLE)
388:           PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
389:           data --;
390: #else
391:           data = baij->colmap[idxn[j]/bs]-1;
392: #endif
393:           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
394:           else {
395:             col  = data + idxn[j]%bs;
396:             MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
397:           }
398:         }
399:       }
400:     } else {
401:       SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
402:     }
403:   }
404:  return(0);
405: }

409: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
410: {
411:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
413:   PetscReal      sum[2],*lnorm2;

416:   if (baij->size == 1) {
417:      MatNorm(baij->A,type,norm);
418:   } else {
419:     if (type == NORM_FROBENIUS) {
420:       PetscMalloc(2*sizeof(PetscReal),&lnorm2);
421:        MatNorm(baij->A,type,lnorm2);
422:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
423:        MatNorm(baij->B,type,lnorm2);
424:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
425:       MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
426:       *norm = PetscSqrtReal(sum[0] + 2*sum[1]);
427:       PetscFree(lnorm2);
428:     } else if (type == NORM_INFINITY || type == NORM_1) { /* max row/column sum */
429:       Mat_SeqSBAIJ *amat=(Mat_SeqSBAIJ*)baij->A->data;
430:       Mat_SeqBAIJ  *bmat=(Mat_SeqBAIJ*)baij->B->data;
431:       PetscReal    *rsum,*rsum2,vabs;
432:       PetscInt     *jj,*garray=baij->garray,rstart=baij->rstartbs,nz;
433:       PetscInt     brow,bcol,col,bs=baij->A->rmap->bs,row,grow,gcol,mbs=amat->mbs;
434:       MatScalar    *v;

436:       PetscMalloc2(mat->cmap->N,PetscReal,&rsum,mat->cmap->N,PetscReal,&rsum2);
437:       PetscMemzero(rsum,mat->cmap->N*sizeof(PetscReal));
438:       /* Amat */
439:       v = amat->a; jj = amat->j;
440:       for (brow=0; brow<mbs; brow++) {
441:         grow = bs*(rstart + brow);
442:         nz = amat->i[brow+1] - amat->i[brow];
443:         for (bcol=0; bcol<nz; bcol++){
444:           gcol = bs*(rstart + *jj); jj++;
445:           for (col=0; col<bs; col++){
446:             for (row=0; row<bs; row++){
447:               vabs = PetscAbsScalar(*v); v++;
448:               rsum[gcol+col] += vabs;
449:               /* non-diagonal block */
450:               if (bcol > 0 && vabs > 0.0) rsum[grow+row] += vabs;
451:             }
452:           }
453:         }
454:       }
455:       /* Bmat */
456:       v = bmat->a; jj = bmat->j;
457:       for (brow=0; brow<mbs; brow++) {
458:         grow = bs*(rstart + brow);
459:         nz = bmat->i[brow+1] - bmat->i[brow];
460:         for (bcol=0; bcol<nz; bcol++){
461:           gcol = bs*garray[*jj]; jj++;
462:           for (col=0; col<bs; col++){
463:             for (row=0; row<bs; row++){
464:               vabs = PetscAbsScalar(*v); v++;
465:               rsum[gcol+col] += vabs;
466:               rsum[grow+row] += vabs;
467:             }
468:           }
469:         }
470:       }
471:       MPI_Allreduce(rsum,rsum2,mat->cmap->N,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
472:       *norm = 0.0;
473:       for (col=0; col<mat->cmap->N; col++) {
474:         if (rsum2[col] > *norm) *norm = rsum2[col];
475:       }
476:       PetscFree2(rsum,rsum2);
477:     } else {
478:       SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for this norm yet");
479:     }
480:   }
481:   return(0);
482: }

486: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
487: {
488:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
490:   PetscInt       nstash,reallocs;
491:   InsertMode     addv;

494:   if (baij->donotstash || mat->nooffprocentries) {
495:     return(0);
496:   }

498:   /* make sure all processors are either in INSERTMODE or ADDMODE */
499:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);
500:   if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
501:   mat->insertmode = addv; /* in case this processor had no cache */

503:   MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
504:   MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);
505:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
506:   PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
507:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
508:   PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
509:   return(0);
510: }

514: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
515: {
516:   Mat_MPISBAIJ   *baij=(Mat_MPISBAIJ*)mat->data;
517:   Mat_SeqSBAIJ   *a=(Mat_SeqSBAIJ*)baij->A->data;
519:   PetscInt       i,j,rstart,ncols,flg,bs2=baij->bs2;
520:   PetscInt       *row,*col;
521:   PetscBool      other_disassembled;
522:   PetscMPIInt    n;
523:   PetscBool      r1,r2,r3;
524:   MatScalar      *val;
525:   InsertMode     addv = mat->insertmode;

527:   /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */

530:   if (!baij->donotstash &&  !mat->nooffprocentries) {
531:     while (1) {
532:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
533:       if (!flg) break;

535:       for (i=0; i<n;) {
536:         /* Now identify the consecutive vals belonging to the same row */
537:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
538:         if (j < n) ncols = j-i;
539:         else       ncols = n-i;
540:         /* Now assemble all these values with a single function call */
541:         MatSetValues_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
542:         i = j;
543:       }
544:     }
545:     MatStashScatterEnd_Private(&mat->stash);
546:     /* Now process the block-stash. Since the values are stashed column-oriented,
547:        set the roworiented flag to column oriented, and after MatSetValues() 
548:        restore the original flags */
549:     r1 = baij->roworiented;
550:     r2 = a->roworiented;
551:     r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
552:     baij->roworiented = PETSC_FALSE;
553:     a->roworiented    = PETSC_FALSE;
554:     ((Mat_SeqBAIJ*)baij->B->data)->roworiented    = PETSC_FALSE; /* b->roworinted */
555:     while (1) {
556:       MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
557:       if (!flg) break;
558: 
559:       for (i=0; i<n;) {
560:         /* Now identify the consecutive vals belonging to the same row */
561:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
562:         if (j < n) ncols = j-i;
563:         else       ncols = n-i;
564:         MatSetValuesBlocked_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
565:         i = j;
566:       }
567:     }
568:     MatStashScatterEnd_Private(&mat->bstash);
569:     baij->roworiented = r1;
570:     a->roworiented    = r2;
571:     ((Mat_SeqBAIJ*)baij->B->data)->roworiented    = r3; /* b->roworinted */
572:   }

574:   MatAssemblyBegin(baij->A,mode);
575:   MatAssemblyEnd(baij->A,mode);

577:   /* determine if any processor has disassembled, if so we must 
578:      also disassemble ourselfs, in order that we may reassemble. */
579:   /*
580:      if nonzero structure of submatrix B cannot change then we know that
581:      no processor disassembled thus we can skip this stuff
582:   */
583:   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
584:     MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);
585:     if (mat->was_assembled && !other_disassembled) {
586:       MatDisAssemble_MPISBAIJ(mat);
587:     }
588:   }

590:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
591:     MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
592:   }
593:   MatSetOption(baij->B,MAT_CHECK_COMPRESSED_ROW,PETSC_TRUE);
594:   MatAssemblyBegin(baij->B,mode);
595:   MatAssemblyEnd(baij->B,mode);
596: 
597:   PetscFree2(baij->rowvalues,baij->rowindices);
598:   baij->rowvalues = 0;

600:   return(0);
601: }

603: extern PetscErrorCode MatSetValues_MPIBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
606: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
607: {
608:   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
609:   PetscErrorCode    ierr;
610:   PetscInt          bs = mat->rmap->bs;
611:   PetscMPIInt       size = baij->size,rank = baij->rank;
612:   PetscBool         iascii,isdraw;
613:   PetscViewer       sviewer;
614:   PetscViewerFormat format;

617:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
618:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
619:   if (iascii) {
620:     PetscViewerGetFormat(viewer,&format);
621:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
622:       MatInfo info;
623:       MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
624:       MatGetInfo(mat,MAT_LOCAL,&info);
625:       PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
626:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,mat->rmap->bs,(PetscInt)info.memory);
627:       MatGetInfo(baij->A,MAT_LOCAL,&info);
628:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
629:       MatGetInfo(baij->B,MAT_LOCAL,&info);
630:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
631:       PetscViewerFlush(viewer);
632:       PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
633:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
634:       VecScatterView(baij->Mvctx,viewer);
635:       return(0);
636:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
637:       PetscViewerASCIIPrintf(viewer,"  block size is %D\n",bs);
638:       return(0);
639:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
640:       return(0);
641:     }
642:   }

644:   if (isdraw) {
645:     PetscDraw  draw;
646:     PetscBool  isnull;
647:     PetscViewerDrawGetDraw(viewer,0,&draw);
648:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
649:   }

651:   if (size == 1) {
652:     PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);
653:     MatView(baij->A,viewer);
654:   } else {
655:     /* assemble the entire matrix onto first processor. */
656:     Mat          A;
657:     Mat_SeqSBAIJ *Aloc;
658:     Mat_SeqBAIJ  *Bloc;
659:     PetscInt     M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
660:     MatScalar    *a;

662:     /* Should this be the same type as mat? */
663:     MatCreate(((PetscObject)mat)->comm,&A);
664:     if (!rank) {
665:       MatSetSizes(A,M,N,M,N);
666:     } else {
667:       MatSetSizes(A,0,0,M,N);
668:     }
669:     MatSetType(A,MATMPISBAIJ);
670:     MatMPISBAIJSetPreallocation(A,mat->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);
671:     MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
672:     PetscLogObjectParent(mat,A);

674:     /* copy over the A part */
675:     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
676:     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
677:     PetscMalloc(bs*sizeof(PetscInt),&rvals);

679:     for (i=0; i<mbs; i++) {
680:       rvals[0] = bs*(baij->rstartbs + i);
681:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
682:       for (j=ai[i]; j<ai[i+1]; j++) {
683:         col = (baij->cstartbs+aj[j])*bs;
684:         for (k=0; k<bs; k++) {
685:           MatSetValues_MPISBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
686:           col++; a += bs;
687:         }
688:       }
689:     }
690:     /* copy over the B part */
691:     Bloc = (Mat_SeqBAIJ*)baij->B->data;
692:     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
693:     for (i=0; i<mbs; i++) {
694: 
695:       rvals[0] = bs*(baij->rstartbs + i);
696:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
697:       for (j=ai[i]; j<ai[i+1]; j++) {
698:         col = baij->garray[aj[j]]*bs;
699:         for (k=0; k<bs; k++) {
700:           MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
701:           col++; a += bs;
702:         }
703:       }
704:     }
705:     PetscFree(rvals);
706:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
707:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
708:     /* 
709:        Everyone has to call to draw the matrix since the graphics waits are
710:        synchronized across all processors that share the PetscDraw object
711:     */
712:     PetscViewerGetSingleton(viewer,&sviewer);
713:     if (!rank) {
714:       PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,((PetscObject)mat)->name);
715:           /* Set the type name to MATMPISBAIJ so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqSBAIJ_ASCII()*/
716:       PetscStrcpy(((PetscObject)((Mat_MPISBAIJ*)(A->data))->A)->type_name,MATMPISBAIJ);
717:       MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
718:     }
719:     PetscViewerRestoreSingleton(viewer,&sviewer);
720:     MatDestroy(&A);
721:   }
722:   return(0);
723: }

727: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
728: {
730:   PetscBool      iascii,isdraw,issocket,isbinary;

733:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
734:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
735:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
736:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
737:   if (iascii || isdraw || issocket || isbinary) {
738:     MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
739:   } else {
740:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
741:   }
742:   return(0);
743: }

747: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
748: {
749:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;

753: #if defined(PETSC_USE_LOG)
754:   PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N);
755: #endif
756:   MatStashDestroy_Private(&mat->stash);
757:   MatStashDestroy_Private(&mat->bstash);
758:   MatDestroy(&baij->A);
759:   MatDestroy(&baij->B);
760: #if defined (PETSC_USE_CTABLE)
761:   PetscTableDestroy(&baij->colmap);
762: #else
763:   PetscFree(baij->colmap);
764: #endif
765:   PetscFree(baij->garray);
766:   VecDestroy(&baij->lvec);
767:   VecScatterDestroy(&baij->Mvctx);
768:   VecDestroy(&baij->slvec0);
769:   VecDestroy(&baij->slvec0b);
770:   VecDestroy(&baij->slvec1);
771:   VecDestroy(&baij->slvec1a);
772:   VecDestroy(&baij->slvec1b);
773:   VecScatterDestroy(&baij->sMvctx);
774:   PetscFree2(baij->rowvalues,baij->rowindices);
775:   PetscFree(baij->barray);
776:   PetscFree(baij->hd);
777:   VecDestroy(&baij->diag);
778:   VecDestroy(&baij->bb1);
779:   VecDestroy(&baij->xx1);
780: #if defined(PETSC_USE_REAL_MAT_SINGLE)
781:   PetscFree(baij->setvaluescopy);
782: #endif
783:   PetscFree(baij->in_loc);
784:   PetscFree(baij->v_loc);
785:   PetscFree(baij->rangebs);
786:   PetscFree(mat->data);

788:   PetscObjectChangeTypeName((PetscObject)mat,0);
789:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
790:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
791:   PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
792:   PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
793:   PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpisbaij_mpisbstrm_C","",PETSC_NULL);
794:   return(0);
795: }

799: PetscErrorCode MatMult_MPISBAIJ_Hermitian(Mat A,Vec xx,Vec yy)
800: {
801:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
803:   PetscInt       nt,mbs=a->mbs,bs=A->rmap->bs;
804:   PetscScalar    *x,*from;
805: 
807:   VecGetLocalSize(xx,&nt);
808:   if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");

810:   /* diagonal part */
811:   (*a->A->ops->mult)(a->A,xx,a->slvec1a);
812:   VecSet(a->slvec1b,0.0);

814:   /* subdiagonal part */
815:   (*a->B->ops->multhermitiantranspose)(a->B,xx,a->slvec0b);

817:   /* copy x into the vec slvec0 */
818:   VecGetArray(a->slvec0,&from);
819:   VecGetArray(xx,&x);

821:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
822:   VecRestoreArray(a->slvec0,&from);
823:   VecRestoreArray(xx,&x);
824: 
825:   VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
826:   VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
827:   /* supperdiagonal part */
828:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
829:   return(0);
830: }

834: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
835: {
836:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
838:   PetscInt       nt,mbs=a->mbs,bs=A->rmap->bs;
839:   PetscScalar    *x,*from;
840: 
842:   VecGetLocalSize(xx,&nt);
843:   if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");

845:   /* diagonal part */
846:   (*a->A->ops->mult)(a->A,xx,a->slvec1a);
847:   VecSet(a->slvec1b,0.0);

849:   /* subdiagonal part */
850:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

852:   /* copy x into the vec slvec0 */
853:   VecGetArray(a->slvec0,&from);
854:   VecGetArray(xx,&x);

856:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
857:   VecRestoreArray(a->slvec0,&from);
858:   VecRestoreArray(xx,&x);
859: 
860:   VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
861:   VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
862:   /* supperdiagonal part */
863:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
864:   return(0);
865: }

869: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
870: {
871:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
873:   PetscInt       nt;

876:   VecGetLocalSize(xx,&nt);
877:   if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");

879:   VecGetLocalSize(yy,&nt);
880:   if (nt != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");

882:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
883:   /* do diagonal part */
884:   (*a->A->ops->mult)(a->A,xx,yy);
885:   /* do supperdiagonal part */
886:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
887:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
888:   /* do subdiagonal part */
889:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
890:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
891:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);

893:   return(0);
894: }

898: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
899: {
900:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
902:   PetscInt       mbs=a->mbs,bs=A->rmap->bs;
903:   PetscScalar    *x,*from,zero=0.0;
904: 
906:   /*
907:   PetscSynchronizedPrintf(((PetscObject)A)->comm," MatMultAdd is called ...\n");
908:   PetscSynchronizedFlush(((PetscObject)A)->comm);
909:   */
910:   /* diagonal part */
911:   (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
912:   VecSet(a->slvec1b,zero);

914:   /* subdiagonal part */
915:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

917:   /* copy x into the vec slvec0 */
918:   VecGetArray(a->slvec0,&from);
919:   VecGetArray(xx,&x);
920:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
921:   VecRestoreArray(a->slvec0,&from);
922: 
923:   VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
924:   VecRestoreArray(xx,&x);
925:   VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
926: 
927:   /* supperdiagonal part */
928:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
929: 
930:   return(0);
931: }

935: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
936: {
937:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

941:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
942:   /* do diagonal part */
943:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
944:   /* do supperdiagonal part */
945:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
946:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);

948:   /* do subdiagonal part */
949:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
950:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
951:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);

953:   return(0);
954: }

956: /*
957:   This only works correctly for square matrices where the subblock A->A is the 
958:    diagonal block
959: */
962: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
963: {
964:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

968:   /* if (a->rmap->N != a->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
969:   MatGetDiagonal(a->A,v);
970:   return(0);
971: }

975: PetscErrorCode MatScale_MPISBAIJ(Mat A,PetscScalar aa)
976: {
977:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

981:   MatScale(a->A,aa);
982:   MatScale(a->B,aa);
983:   return(0);
984: }

988: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
989: {
990:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
991:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
993:   PetscInt       bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
994:   PetscInt       nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend;
995:   PetscInt       *cmap,*idx_p,cstart = mat->rstartbs;

998:   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
999:   mat->getrowactive = PETSC_TRUE;

1001:   if (!mat->rowvalues && (idx || v)) {
1002:     /*
1003:         allocate enough space to hold information from the longest row.
1004:     */
1005:     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1006:     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
1007:     PetscInt     max = 1,mbs = mat->mbs,tmp;
1008:     for (i=0; i<mbs; i++) {
1009:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1010:       if (max < tmp) { max = tmp; }
1011:     }
1012:     PetscMalloc2(max*bs2,PetscScalar,&mat->rowvalues,max*bs2,PetscInt,&mat->rowindices);
1013:   }
1014: 
1015:   if (row < brstart || row >= brend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local rows");
1016:   lrow = row - brstart;  /* local row index */

1018:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1019:   if (!v)   {pvA = 0; pvB = 0;}
1020:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1021:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1022:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1023:   nztot = nzA + nzB;

1025:   cmap  = mat->garray;
1026:   if (v  || idx) {
1027:     if (nztot) {
1028:       /* Sort by increasing column numbers, assuming A and B already sorted */
1029:       PetscInt imark = -1;
1030:       if (v) {
1031:         *v = v_p = mat->rowvalues;
1032:         for (i=0; i<nzB; i++) {
1033:           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1034:           else break;
1035:         }
1036:         imark = i;
1037:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1038:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1039:       }
1040:       if (idx) {
1041:         *idx = idx_p = mat->rowindices;
1042:         if (imark > -1) {
1043:           for (i=0; i<imark; i++) {
1044:             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1045:           }
1046:         } else {
1047:           for (i=0; i<nzB; i++) {
1048:             if (cmap[cworkB[i]/bs] < cstart)
1049:               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1050:             else break;
1051:           }
1052:           imark = i;
1053:         }
1054:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1055:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1056:       }
1057:     } else {
1058:       if (idx) *idx = 0;
1059:       if (v)   *v   = 0;
1060:     }
1061:   }
1062:   *nz = nztot;
1063:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1064:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1065:   return(0);
1066: }

1070: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1071: {
1072:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;

1075:   if (!baij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1076:   baij->getrowactive = PETSC_FALSE;
1077:   return(0);
1078: }

1082: PetscErrorCode MatGetRowUpperTriangular_MPISBAIJ(Mat A)
1083: {
1084:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1085:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1088:   aA->getrow_utriangular = PETSC_TRUE;
1089:   return(0);
1090: }
1093: PetscErrorCode MatRestoreRowUpperTriangular_MPISBAIJ(Mat A)
1094: {
1095:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1096:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1099:   aA->getrow_utriangular = PETSC_FALSE;
1100:   return(0);
1101: }

1105: PetscErrorCode MatRealPart_MPISBAIJ(Mat A)
1106: {
1107:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1111:   MatRealPart(a->A);
1112:   MatRealPart(a->B);
1113:   return(0);
1114: }

1118: PetscErrorCode MatImaginaryPart_MPISBAIJ(Mat A)
1119: {
1120:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1124:   MatImaginaryPart(a->A);
1125:   MatImaginaryPart(a->B);
1126:   return(0);
1127: }

1131: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1132: {
1133:   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;

1137:   MatZeroEntries(l->A);
1138:   MatZeroEntries(l->B);
1139:   return(0);
1140: }

1144: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1145: {
1146:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)matin->data;
1147:   Mat            A = a->A,B = a->B;
1149:   PetscReal      isend[5],irecv[5];

1152:   info->block_size     = (PetscReal)matin->rmap->bs;
1153:   MatGetInfo(A,MAT_LOCAL,info);
1154:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1155:   isend[3] = info->memory;  isend[4] = info->mallocs;
1156:   MatGetInfo(B,MAT_LOCAL,info);
1157:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1158:   isend[3] += info->memory;  isend[4] += info->mallocs;
1159:   if (flag == MAT_LOCAL) {
1160:     info->nz_used      = isend[0];
1161:     info->nz_allocated = isend[1];
1162:     info->nz_unneeded  = isend[2];
1163:     info->memory       = isend[3];
1164:     info->mallocs      = isend[4];
1165:   } else if (flag == MAT_GLOBAL_MAX) {
1166:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,((PetscObject)matin)->comm);
1167:     info->nz_used      = irecv[0];
1168:     info->nz_allocated = irecv[1];
1169:     info->nz_unneeded  = irecv[2];
1170:     info->memory       = irecv[3];
1171:     info->mallocs      = irecv[4];
1172:   } else if (flag == MAT_GLOBAL_SUM) {
1173:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,((PetscObject)matin)->comm);
1174:     info->nz_used      = irecv[0];
1175:     info->nz_allocated = irecv[1];
1176:     info->nz_unneeded  = irecv[2];
1177:     info->memory       = irecv[3];
1178:     info->mallocs      = irecv[4];
1179:   } else {
1180:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1181:   }
1182:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1183:   info->fill_ratio_needed = 0;
1184:   info->factor_mallocs    = 0;
1185:   return(0);
1186: }

1190: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op,PetscBool  flg)
1191: {
1192:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1193:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1197:   switch (op) {
1198:   case MAT_NEW_NONZERO_LOCATIONS:
1199:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1200:   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1201:   case MAT_KEEP_NONZERO_PATTERN:
1202:   case MAT_NEW_NONZERO_LOCATION_ERR:
1203:     MatSetOption(a->A,op,flg);
1204:     MatSetOption(a->B,op,flg);
1205:     break;
1206:   case MAT_ROW_ORIENTED:
1207:     a->roworiented = flg;
1208:     MatSetOption(a->A,op,flg);
1209:     MatSetOption(a->B,op,flg);
1210:     break;
1211:   case MAT_NEW_DIAGONALS:
1212:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1213:     break;
1214:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1215:     a->donotstash = flg;
1216:     break;
1217:   case MAT_USE_HASH_TABLE:
1218:     a->ht_flag = flg;
1219:     break;
1220:   case MAT_HERMITIAN:
1221:     if (!A->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must call MatAssemblyEnd() first");
1222:     MatSetOption(a->A,op,flg);
1223:     A->ops->mult = MatMult_MPISBAIJ_Hermitian;
1224:     break;
1225:   case MAT_SPD:
1226:     A->spd_set                         = PETSC_TRUE;
1227:     A->spd                             = flg;
1228:     if (flg) {
1229:       A->symmetric                     = PETSC_TRUE;
1230:       A->structurally_symmetric        = PETSC_TRUE;
1231:       A->symmetric_set                 = PETSC_TRUE;
1232:       A->structurally_symmetric_set    = PETSC_TRUE;
1233:     }
1234:     break;
1235:   case MAT_SYMMETRIC:
1236:     MatSetOption(a->A,op,flg);
1237:     break;
1238:   case MAT_STRUCTURALLY_SYMMETRIC:
1239:     MatSetOption(a->A,op,flg);
1240:     break;
1241:   case MAT_SYMMETRY_ETERNAL:
1242:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix must be symmetric");
1243:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1244:     break;
1245:   case MAT_IGNORE_LOWER_TRIANGULAR:
1246:     aA->ignore_ltriangular = flg;
1247:     break;
1248:   case MAT_ERROR_LOWER_TRIANGULAR:
1249:     aA->ignore_ltriangular = flg;
1250:     break;
1251:   case MAT_GETROW_UPPERTRIANGULAR:
1252:     aA->getrow_utriangular = flg;
1253:     break;
1254:   default:
1255:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1256:   }
1257:   return(0);
1258: }

1262: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,MatReuse reuse,Mat *B)
1263: {
1266:   if (MAT_INITIAL_MATRIX || *B != A) {
1267:     MatDuplicate(A,MAT_COPY_VALUES,B);
1268:   }
1269:   return(0);
1270: }

1274: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1275: {
1276:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
1277:   Mat            a=baij->A, b=baij->B;
1279:   PetscInt       nv,m,n;
1280:   PetscBool      flg;

1283:   if (ll != rr){
1284:     VecEqual(ll,rr,&flg);
1285:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1286:   }
1287:   if (!ll) return(0);

1289:   MatGetLocalSize(mat,&m,&n);
1290:   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"For symmetric format, local size %d %d must be same",m,n);
1291: 
1292:   VecGetLocalSize(rr,&nv);
1293:   if (nv!=n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Left and right vector non-conforming local size");

1295:   VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1296: 
1297:   /* left diagonalscale the off-diagonal part */
1298:   (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1299: 
1300:   /* scale the diagonal part */
1301:   (*a->ops->diagonalscale)(a,ll,rr);

1303:   /* right diagonalscale the off-diagonal part */
1304:   VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1305:   (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1306:   return(0);
1307: }

1311: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1312: {
1313:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1317:   MatSetUnfactored(a->A);
1318:   return(0);
1319: }

1321: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);

1325: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscBool  *flag)
1326: {
1327:   Mat_MPISBAIJ   *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1328:   Mat            a,b,c,d;
1329:   PetscBool      flg;

1333:   a = matA->A; b = matA->B;
1334:   c = matB->A; d = matB->B;

1336:   MatEqual(a,c,&flg);
1337:   if (flg) {
1338:     MatEqual(b,d,&flg);
1339:   }
1340:   MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
1341:   return(0);
1342: }

1346: PetscErrorCode MatCopy_MPISBAIJ(Mat A,Mat B,MatStructure str)
1347: {
1349:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ *)A->data;
1350:   Mat_MPISBAIJ   *b = (Mat_MPISBAIJ *)B->data;

1353:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1354:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1355:     MatGetRowUpperTriangular(A);
1356:     MatCopy_Basic(A,B,str);
1357:     MatRestoreRowUpperTriangular(A);
1358:   } else {
1359:     MatCopy(a->A,b->A,str);
1360:     MatCopy(a->B,b->B,str);
1361:   }
1362:   return(0);
1363: }

1367: PetscErrorCode MatSetUp_MPISBAIJ(Mat A)
1368: {

1372:   MatMPISBAIJSetPreallocation(A,A->rmap->bs,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1373:   return(0);
1374: }

1378: PetscErrorCode MatAXPY_MPISBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1379: {
1381:   Mat_MPISBAIJ   *xx=(Mat_MPISBAIJ *)X->data,*yy=(Mat_MPISBAIJ *)Y->data;
1382:   PetscBLASInt   bnz,one=1;
1383:   Mat_SeqSBAIJ   *xa,*ya;
1384:   Mat_SeqBAIJ    *xb,*yb;

1387:   if (str == SAME_NONZERO_PATTERN) {
1388:     PetscScalar alpha = a;
1389:     xa = (Mat_SeqSBAIJ *)xx->A->data;
1390:     ya = (Mat_SeqSBAIJ *)yy->A->data;
1391:     bnz = PetscBLASIntCast(xa->nz);
1392:     BLASaxpy_(&bnz,&alpha,xa->a,&one,ya->a,&one);
1393:     xb = (Mat_SeqBAIJ *)xx->B->data;
1394:     yb = (Mat_SeqBAIJ *)yy->B->data;
1395:     bnz = PetscBLASIntCast(xb->nz);
1396:     BLASaxpy_(&bnz,&alpha,xb->a,&one,yb->a,&one);
1397:   } else {
1398:     MatGetRowUpperTriangular(X);
1399:     MatAXPY_Basic(Y,a,X,str);
1400:     MatRestoreRowUpperTriangular(X);
1401:   }
1402:   return(0);
1403: }

1407: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1408: {
1410:   PetscInt       i;
1411:   PetscBool      flg;

1414:   for (i=0; i<n; i++) {
1415:     ISEqual(irow[i],icol[i],&flg);
1416:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1417:   }
1418:   MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1419:   return(0);
1420: }
1421: 

1423: /* -------------------------------------------------------------------*/
1424: static struct _MatOps MatOps_Values = {
1425:        MatSetValues_MPISBAIJ,
1426:        MatGetRow_MPISBAIJ,
1427:        MatRestoreRow_MPISBAIJ,
1428:        MatMult_MPISBAIJ,
1429: /* 4*/ MatMultAdd_MPISBAIJ,
1430:        MatMult_MPISBAIJ,       /* transpose versions are same as non-transpose */
1431:        MatMultAdd_MPISBAIJ,
1432:        0,
1433:        0,
1434:        0,
1435: /*10*/ 0,
1436:        0,
1437:        0,
1438:        MatSOR_MPISBAIJ,
1439:        MatTranspose_MPISBAIJ,
1440: /*15*/ MatGetInfo_MPISBAIJ,
1441:        MatEqual_MPISBAIJ,
1442:        MatGetDiagonal_MPISBAIJ,
1443:        MatDiagonalScale_MPISBAIJ,
1444:        MatNorm_MPISBAIJ,
1445: /*20*/ MatAssemblyBegin_MPISBAIJ,
1446:        MatAssemblyEnd_MPISBAIJ,
1447:        MatSetOption_MPISBAIJ,
1448:        MatZeroEntries_MPISBAIJ,
1449: /*24*/ 0,
1450:        0,
1451:        0,
1452:        0,
1453:        0,
1454: /*29*/ MatSetUp_MPISBAIJ,
1455:        0,
1456:        0,
1457:        0,
1458:        0,
1459: /*34*/ MatDuplicate_MPISBAIJ,
1460:        0,
1461:        0,
1462:        0,
1463:        0,
1464: /*39*/ MatAXPY_MPISBAIJ,
1465:        MatGetSubMatrices_MPISBAIJ,
1466:        MatIncreaseOverlap_MPISBAIJ,
1467:        MatGetValues_MPISBAIJ,
1468:        MatCopy_MPISBAIJ,
1469: /*44*/ 0,
1470:        MatScale_MPISBAIJ,
1471:        0,
1472:        0,
1473:        0,
1474: /*49*/ 0,
1475:        0,
1476:        0,
1477:        0,
1478:        0,
1479: /*54*/ 0,
1480:        0,
1481:        MatSetUnfactored_MPISBAIJ,
1482:        0,
1483:        MatSetValuesBlocked_MPISBAIJ,
1484: /*59*/ 0,
1485:        0,
1486:        0,
1487:        0,
1488:        0,
1489: /*64*/ 0,
1490:        0,
1491:        0,
1492:        0,
1493:        0,
1494: /*69*/ MatGetRowMaxAbs_MPISBAIJ,
1495:        0,
1496:        0,
1497:        0,
1498:        0,
1499: /*74*/ 0,
1500:        0,
1501:        0,
1502:        0,
1503:        0,
1504: /*79*/ 0,
1505:        0,
1506:        0,
1507:        0,
1508:        MatLoad_MPISBAIJ,
1509: /*84*/ 0,
1510:        0,
1511:        0,
1512:        0,
1513:        0,
1514: /*89*/ 0,
1515:        0,
1516:        0,
1517:        0,
1518:        0,
1519: /*94*/ 0,
1520:        0,
1521:        0,
1522:        0,
1523:        0,
1524: /*99*/ 0,
1525:        0,
1526:        0,
1527:        0,
1528:        0,
1529: /*104*/0,
1530:        MatRealPart_MPISBAIJ,
1531:        MatImaginaryPart_MPISBAIJ,
1532:        MatGetRowUpperTriangular_MPISBAIJ,
1533:        MatRestoreRowUpperTriangular_MPISBAIJ,
1534: /*109*/0,
1535:        0,
1536:        0,
1537:        0,
1538:        0,
1539: /*114*/0,
1540:        0,
1541:        0,
1542:        0,
1543:        0,
1544: /*119*/0,
1545:        0,
1546:        0,
1547:        0
1548: };


1551: EXTERN_C_BEGIN
1554: PetscErrorCode  MatGetDiagonalBlock_MPISBAIJ(Mat A,Mat *a)
1555: {
1557:   *a = ((Mat_MPISBAIJ *)A->data)->A;
1558:   return(0);
1559: }
1560: EXTERN_C_END

1562: EXTERN_C_BEGIN
1565: PetscErrorCode  MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1566: {
1567:   Mat_MPISBAIJ   *b;
1569:   PetscInt       i,mbs,Mbs;

1572:   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1573:   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1574:   if (d_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1575:   if (o_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);

1577:   PetscLayoutSetBlockSize(B->rmap,bs);
1578:   PetscLayoutSetBlockSize(B->cmap,bs);
1579:   PetscLayoutSetUp(B->rmap);
1580:   PetscLayoutSetUp(B->cmap);
1581:   PetscLayoutGetBlockSize(B->rmap,&bs);

1583:   if (d_nnz) {
1584:     for (i=0; i<B->rmap->n/bs; i++) {
1585:       if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1586:     }
1587:   }
1588:   if (o_nnz) {
1589:     for (i=0; i<B->rmap->n/bs; i++) {
1590:       if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1591:     }
1592:   }

1594:   b   = (Mat_MPISBAIJ*)B->data;
1595:   mbs = B->rmap->n/bs;
1596:   Mbs = B->rmap->N/bs;
1597:   if (mbs*bs != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->rmap->N,bs);

1599:   B->rmap->bs  = bs;
1600:   b->bs2 = bs*bs;
1601:   b->mbs = mbs;
1602:   b->nbs = mbs;
1603:   b->Mbs = Mbs;
1604:   b->Nbs = Mbs;

1606:   for (i=0; i<=b->size; i++) {
1607:     b->rangebs[i] = B->rmap->range[i]/bs;
1608:   }
1609:   b->rstartbs = B->rmap->rstart/bs;
1610:   b->rendbs   = B->rmap->rend/bs;
1611: 
1612:   b->cstartbs = B->cmap->rstart/bs;
1613:   b->cendbs   = B->cmap->rend/bs;

1615:   if (!B->preallocated) {
1616:     MatCreate(PETSC_COMM_SELF,&b->A);
1617:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
1618:     MatSetType(b->A,MATSEQSBAIJ);
1619:     PetscLogObjectParent(B,b->A);
1620:     MatCreate(PETSC_COMM_SELF,&b->B);
1621:     MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
1622:     MatSetType(b->B,MATSEQBAIJ);
1623:     PetscLogObjectParent(B,b->B);
1624:     MatStashCreate_Private(((PetscObject)B)->comm,bs,&B->bstash);
1625:   }

1627:   MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1628:   MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1629:   B->preallocated = PETSC_TRUE;
1630:   return(0);
1631: }
1632: EXTERN_C_END

1634: EXTERN_C_BEGIN
1637: PetscErrorCode MatMPISBAIJSetPreallocationCSR_MPISBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[])
1638: {
1639:   PetscInt       m,rstart,cstart,cend;
1640:   PetscInt       i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0;
1641:   const PetscInt *JJ=0;
1642:   PetscScalar    *values=0;


1647:   if (bs < 1) SETERRQ1(((PetscObject)B)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive but it is %D",bs);
1648:   PetscLayoutSetBlockSize(B->rmap,bs);
1649:   PetscLayoutSetBlockSize(B->cmap,bs);
1650:   PetscLayoutSetUp(B->rmap);
1651:   PetscLayoutSetUp(B->cmap);
1652:   PetscLayoutGetBlockSize(B->rmap,&bs);
1653:   m      = B->rmap->n/bs;
1654:   rstart = B->rmap->rstart/bs;
1655:   cstart = B->cmap->rstart/bs;
1656:   cend   = B->cmap->rend/bs;

1658:   if (ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]);
1659:   PetscMalloc2(m,PetscInt,&d_nnz,m,PetscInt,&o_nnz);
1660:   for (i=0; i<m; i++) {
1661:     nz = ii[i+1] - ii[i];
1662:     if (nz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz);
1663:     nz_max = PetscMax(nz_max,nz);
1664:     JJ  = jj + ii[i];
1665:     for (j=0; j<nz; j++) {
1666:       if (*JJ >= cstart) break;
1667:       JJ++;
1668:     }
1669:     d = 0;
1670:     for (; j<nz; j++) {
1671:       if (*JJ++ >= cend) break;
1672:       d++;
1673:     }
1674:     d_nnz[i] = d;
1675:     o_nnz[i] = nz - d;
1676:   }
1677:   MatMPISBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);
1678:   PetscFree2(d_nnz,o_nnz);

1680:   values = (PetscScalar*)V;
1681:   if (!values) {
1682:     PetscMalloc(bs*bs*nz_max*sizeof(PetscScalar),&values);
1683:     PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));
1684:   }
1685:   for (i=0; i<m; i++) {
1686:     PetscInt          row    = i + rstart;
1687:     PetscInt          ncols  = ii[i+1] - ii[i];
1688:     const PetscInt    *icols = jj + ii[i];
1689:     const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0);
1690:     MatSetValuesBlocked_MPISBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);
1691:   }

1693:   if (!V) { PetscFree(values); }
1694:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1695:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1696:   MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
1697:   return(0);
1698: }
1699: EXTERN_C_END

1701: EXTERN_C_BEGIN
1702: #if defined(PETSC_HAVE_MUMPS)
1703: extern PetscErrorCode  MatGetFactor_sbaij_mumps(Mat,MatFactorType,Mat*);
1704: #endif
1705: #if defined(PETSC_HAVE_SPOOLES)
1706: extern PetscErrorCode  MatGetFactor_mpisbaij_spooles(Mat,MatFactorType,Mat*);
1707: #endif
1708: #if defined(PETSC_HAVE_PASTIX)
1709: extern PetscErrorCode MatGetFactor_mpisbaij_pastix(Mat,MatFactorType,Mat*);
1710: #endif
1711: EXTERN_C_END

1713: /*MC
1714:    MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices, 
1715:    based on block compressed sparse row format.  Only the upper triangular portion of the "diagonal" portion of 
1716:    the matrix is stored.

1718:   For complex numbers by default this matrix is symmetric, NOT Hermitian symmetric. To make it Hermitian symmetric you
1719:   can call MatSetOption(Mat, MAT_HERMITIAN); 

1721:    Options Database Keys:
1722: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()

1724:   Level: beginner

1726: .seealso: MatCreateMPISBAIJ
1727: M*/

1729: EXTERN_C_BEGIN
1730: extern PetscErrorCode MatConvert_MPISBAIJ_MPISBSTRM(Mat,const MatType,MatReuse,Mat*);
1731: EXTERN_C_END

1733: EXTERN_C_BEGIN
1736: PetscErrorCode  MatCreate_MPISBAIJ(Mat B)
1737: {
1738:   Mat_MPISBAIJ   *b;
1740:   PetscBool      flg;


1744:   PetscNewLog(B,Mat_MPISBAIJ,&b);
1745:   B->data = (void*)b;
1746:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));

1748:   B->ops->destroy    = MatDestroy_MPISBAIJ;
1749:   B->ops->view       = MatView_MPISBAIJ;
1750:   B->assembled       = PETSC_FALSE;

1752:   B->insertmode = NOT_SET_VALUES;
1753:   MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);
1754:   MPI_Comm_size(((PetscObject)B)->comm,&b->size);

1756:   /* build local table of row and column ownerships */
1757:   PetscMalloc((b->size+2)*sizeof(PetscInt),&b->rangebs);

1759:   /* build cache for off array entries formed */
1760:   MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);
1761:   b->donotstash  = PETSC_FALSE;
1762:   b->colmap      = PETSC_NULL;
1763:   b->garray      = PETSC_NULL;
1764:   b->roworiented = PETSC_TRUE;

1766:   /* stuff used in block assembly */
1767:   b->barray       = 0;

1769:   /* stuff used for matrix vector multiply */
1770:   b->lvec         = 0;
1771:   b->Mvctx        = 0;
1772:   b->slvec0       = 0;
1773:   b->slvec0b      = 0;
1774:   b->slvec1       = 0;
1775:   b->slvec1a      = 0;
1776:   b->slvec1b      = 0;
1777:   b->sMvctx       = 0;

1779:   /* stuff for MatGetRow() */
1780:   b->rowindices   = 0;
1781:   b->rowvalues    = 0;
1782:   b->getrowactive = PETSC_FALSE;

1784:   /* hash table stuff */
1785:   b->ht           = 0;
1786:   b->hd           = 0;
1787:   b->ht_size      = 0;
1788:   b->ht_flag      = PETSC_FALSE;
1789:   b->ht_fact      = 0;
1790:   b->ht_total_ct  = 0;
1791:   b->ht_insert_ct = 0;

1793:   /* stuff for MatGetSubMatrices_MPIBAIJ_local() */
1794:   b->ijonly       = PETSC_FALSE;

1796:   b->in_loc       = 0;
1797:   b->v_loc        = 0;
1798:   b->n_loc        = 0;
1799:   PetscOptionsBegin(((PetscObject)B)->comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 1","Mat");
1800:     PetscOptionsBool("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);
1801:     if (flg) {
1802:       PetscReal fact = 1.39;
1803:       MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);
1804:       PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);
1805:       if (fact <= 1.0) fact = 1.39;
1806:       MatMPIBAIJSetHashTableFactor(B,fact);
1807:       PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);
1808:     }
1809:   PetscOptionsEnd();

1811: #if defined(PETSC_HAVE_PASTIX)
1812:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_pastix_C",
1813:                                            "MatGetFactor_mpisbaij_pastix",
1814:                                            MatGetFactor_mpisbaij_pastix);
1815: #endif
1816: #if defined(PETSC_HAVE_MUMPS)
1817:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mumps_C",
1818:                                      "MatGetFactor_sbaij_mumps",
1819:                                      MatGetFactor_sbaij_mumps);
1820: #endif
1821: #if defined(PETSC_HAVE_SPOOLES)
1822:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_spooles_C",
1823:                                      "MatGetFactor_mpisbaij_spooles",
1824:                                      MatGetFactor_mpisbaij_spooles);
1825: #endif
1826:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1827:                                      "MatStoreValues_MPISBAIJ",
1828:                                      MatStoreValues_MPISBAIJ);
1829:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1830:                                      "MatRetrieveValues_MPISBAIJ",
1831:                                      MatRetrieveValues_MPISBAIJ);
1832:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1833:                                      "MatGetDiagonalBlock_MPISBAIJ",
1834:                                      MatGetDiagonalBlock_MPISBAIJ);
1835:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1836:                                      "MatMPISBAIJSetPreallocation_MPISBAIJ",
1837:                                      MatMPISBAIJSetPreallocation_MPISBAIJ);
1838:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocationCSR_C",
1839:                                      "MatMPISBAIJSetPreallocationCSR_MPISBAIJ",
1840:                                      MatMPISBAIJSetPreallocationCSR_MPISBAIJ);
1841:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpisbaij_mpisbstrm_C",
1842:                                      "MatConvert_MPISBAIJ_MPISBSTRM",
1843:                                       MatConvert_MPISBAIJ_MPISBSTRM);

1845:   B->symmetric                  = PETSC_TRUE;
1846:   B->structurally_symmetric     = PETSC_TRUE;
1847:   B->symmetric_set              = PETSC_TRUE;
1848:   B->structurally_symmetric_set = PETSC_TRUE;
1849:   PetscObjectChangeTypeName((PetscObject)B,MATMPISBAIJ);
1850:   return(0);
1851: }
1852: EXTERN_C_END

1854: /*MC
1855:    MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.

1857:    This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1858:    and MATMPISBAIJ otherwise.

1860:    Options Database Keys:
1861: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()

1863:   Level: beginner

1865: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1866: M*/

1870: /*@C
1871:    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1872:    the user should preallocate the matrix storage by setting the parameters 
1873:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1874:    performance can be increased by more than a factor of 50.

1876:    Collective on Mat

1878:    Input Parameters:
1879: +  A - the matrix 
1880: .  bs   - size of blockk
1881: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1882:            submatrix  (same for all local rows)
1883: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1884:            in the upper triangular and diagonal part of the in diagonal portion of the local
1885:            (possibly different for each block row) or PETSC_NULL.  If you plan to factor the matrix you must leave room 
1886:            for the diagonal entry and set a value even if it is zero.
1887: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1888:            submatrix (same for all local rows).
1889: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1890:            off-diagonal portion of the local submatrix that is right of the diagonal 
1891:            (possibly different for each block row) or PETSC_NULL.


1894:    Options Database Keys:
1895: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1896:                      block calculations (much slower)
1897: .   -mat_block_size - size of the blocks to use

1899:    Notes:

1901:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1902:    than it must be used on all processors that share the object for that argument.

1904:    If the *_nnz parameter is given then the *_nz parameter is ignored

1906:    Storage Information:
1907:    For a square global matrix we define each processor's diagonal portion 
1908:    to be its local rows and the corresponding columns (a square submatrix);  
1909:    each processor's off-diagonal portion encompasses the remainder of the
1910:    local matrix (a rectangular submatrix). 

1912:    The user can specify preallocated storage for the diagonal part of
1913:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1914:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1915:    memory allocation.  Likewise, specify preallocated storage for the
1916:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1918:    You can call MatGetInfo() to get information on how effective the preallocation was;
1919:    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1920:    You can also run with the option -info and look for messages with the string 
1921:    malloc in them to see if additional memory allocation was needed.

1923:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1924:    the figure below we depict these three local rows and all columns (0-11).

1926: .vb
1927:            0 1 2 3 4 5 6 7 8 9 10 11
1928:           -------------------
1929:    row 3  |  . . . d d d o o o o o o
1930:    row 4  |  . . . d d d o o o o o o
1931:    row 5  |  . . . d d d o o o o o o
1932:           -------------------
1933: .ve
1934:   
1935:    Thus, any entries in the d locations are stored in the d (diagonal) 
1936:    submatrix, and any entries in the o locations are stored in the
1937:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1938:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

1940:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1941:    plus the diagonal part of the d matrix,
1942:    and o_nz should indicate the number of block nonzeros per row in the o matrix

1944:    In general, for PDE problems in which most nonzeros are near the diagonal,
1945:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1946:    or you will get TERRIBLE performance; see the users' manual chapter on
1947:    matrices.

1949:    Level: intermediate

1951: .keywords: matrix, block, aij, compressed row, sparse, parallel

1953: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateBAIJ()
1954: @*/
1955: PetscErrorCode  MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1956: {

1963:   PetscTryMethod(B,"MatMPISBAIJSetPreallocation_C",(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,bs,d_nz,d_nnz,o_nz,o_nnz));
1964:   return(0);
1965: }

1969: /*@C
1970:    MatCreateSBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1971:    (block compressed row).  For good matrix assembly performance
1972:    the user should preallocate the matrix storage by setting the parameters 
1973:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1974:    performance can be increased by more than a factor of 50.

1976:    Collective on MPI_Comm

1978:    Input Parameters:
1979: +  comm - MPI communicator
1980: .  bs   - size of blockk
1981: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1982:            This value should be the same as the local size used in creating the 
1983:            y vector for the matrix-vector product y = Ax.
1984: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1985:            This value should be the same as the local size used in creating the 
1986:            x vector for the matrix-vector product y = Ax.
1987: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1988: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1989: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1990:            submatrix  (same for all local rows)
1991: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1992:            in the upper triangular portion of the in diagonal portion of the local 
1993:            (possibly different for each block block row) or PETSC_NULL.  
1994:            If you plan to factor the matrix you must leave room for the diagonal entry and 
1995:            set its value even if it is zero.
1996: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1997:            submatrix (same for all local rows).
1998: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1999:            off-diagonal portion of the local submatrix (possibly different for
2000:            each block row) or PETSC_NULL.

2002:    Output Parameter:
2003: .  A - the matrix 

2005:    Options Database Keys:
2006: .   -mat_no_unroll - uses code that does not unroll the loops in the 
2007:                      block calculations (much slower)
2008: .   -mat_block_size - size of the blocks to use
2009: .   -mat_mpi - use the parallel matrix data structures even on one processor 
2010:                (defaults to using SeqBAIJ format on one processor)

2012:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
2013:    MatXXXXSetPreallocation() paradgm instead of this routine directly. 
2014:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

2016:    Notes:
2017:    The number of rows and columns must be divisible by blocksize.
2018:    This matrix type does not support complex Hermitian operation.

2020:    The user MUST specify either the local or global matrix dimensions
2021:    (possibly both).

2023:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
2024:    than it must be used on all processors that share the object for that argument.

2026:    If the *_nnz parameter is given then the *_nz parameter is ignored

2028:    Storage Information:
2029:    For a square global matrix we define each processor's diagonal portion 
2030:    to be its local rows and the corresponding columns (a square submatrix);  
2031:    each processor's off-diagonal portion encompasses the remainder of the
2032:    local matrix (a rectangular submatrix). 

2034:    The user can specify preallocated storage for the diagonal part of
2035:    the local submatrix with either d_nz or d_nnz (not both).  Set 
2036:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2037:    memory allocation.  Likewise, specify preallocated storage for the
2038:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

2040:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2041:    the figure below we depict these three local rows and all columns (0-11).

2043: .vb
2044:            0 1 2 3 4 5 6 7 8 9 10 11
2045:           -------------------
2046:    row 3  |  . . . d d d o o o o o o
2047:    row 4  |  . . . d d d o o o o o o
2048:    row 5  |  . . . d d d o o o o o o
2049:           -------------------
2050: .ve
2051:   
2052:    Thus, any entries in the d locations are stored in the d (diagonal) 
2053:    submatrix, and any entries in the o locations are stored in the
2054:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
2055:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

2057:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
2058:    plus the diagonal part of the d matrix,
2059:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
2060:    In general, for PDE problems in which most nonzeros are near the diagonal,
2061:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
2062:    or you will get TERRIBLE performance; see the users' manual chapter on
2063:    matrices.

2065:    Level: intermediate

2067: .keywords: matrix, block, aij, compressed row, sparse, parallel

2069: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateBAIJ()
2070: @*/

2072: PetscErrorCode  MatCreateSBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
2073: {
2075:   PetscMPIInt    size;

2078:   MatCreate(comm,A);
2079:   MatSetSizes(*A,m,n,M,N);
2080:   MPI_Comm_size(comm,&size);
2081:   if (size > 1) {
2082:     MatSetType(*A,MATMPISBAIJ);
2083:     MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
2084:   } else {
2085:     MatSetType(*A,MATSEQSBAIJ);
2086:     MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
2087:   }
2088:   return(0);
2089: }


2094: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2095: {
2096:   Mat            mat;
2097:   Mat_MPISBAIJ   *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
2099:   PetscInt       len=0,nt,bs=matin->rmap->bs,mbs=oldmat->mbs;
2100:   PetscScalar    *array;

2103:   *newmat       = 0;
2104:   MatCreate(((PetscObject)matin)->comm,&mat);
2105:   MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2106:   MatSetType(mat,((PetscObject)matin)->type_name);
2107:   PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2108:   PetscLayoutReference(matin->rmap,&mat->rmap);
2109:   PetscLayoutReference(matin->cmap,&mat->cmap);
2110: 
2111:   mat->factortype   = matin->factortype;
2112:   mat->preallocated = PETSC_TRUE;
2113:   mat->assembled    = PETSC_TRUE;
2114:   mat->insertmode   = NOT_SET_VALUES;

2116:   a = (Mat_MPISBAIJ*)mat->data;
2117:   a->bs2   = oldmat->bs2;
2118:   a->mbs   = oldmat->mbs;
2119:   a->nbs   = oldmat->nbs;
2120:   a->Mbs   = oldmat->Mbs;
2121:   a->Nbs   = oldmat->Nbs;


2124:   a->size         = oldmat->size;
2125:   a->rank         = oldmat->rank;
2126:   a->donotstash   = oldmat->donotstash;
2127:   a->roworiented  = oldmat->roworiented;
2128:   a->rowindices   = 0;
2129:   a->rowvalues    = 0;
2130:   a->getrowactive = PETSC_FALSE;
2131:   a->barray       = 0;
2132:   a->rstartbs    = oldmat->rstartbs;
2133:   a->rendbs      = oldmat->rendbs;
2134:   a->cstartbs    = oldmat->cstartbs;
2135:   a->cendbs      = oldmat->cendbs;

2137:   /* hash table stuff */
2138:   a->ht           = 0;
2139:   a->hd           = 0;
2140:   a->ht_size      = 0;
2141:   a->ht_flag      = oldmat->ht_flag;
2142:   a->ht_fact      = oldmat->ht_fact;
2143:   a->ht_total_ct  = 0;
2144:   a->ht_insert_ct = 0;
2145: 
2146:   PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+2)*sizeof(PetscInt));
2147:   if (oldmat->colmap) {
2148: #if defined (PETSC_USE_CTABLE)
2149:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2150: #else
2151:     PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2152:     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2153:     PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2154: #endif
2155:   } else a->colmap = 0;

2157:   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2158:     PetscMalloc(len*sizeof(PetscInt),&a->garray);
2159:     PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2160:     PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2161:   } else a->garray = 0;
2162: 
2163:   MatStashCreate_Private(((PetscObject)matin)->comm,matin->rmap->bs,&mat->bstash);
2164:   VecDuplicate(oldmat->lvec,&a->lvec);
2165:   PetscLogObjectParent(mat,a->lvec);
2166:   VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2167:   PetscLogObjectParent(mat,a->Mvctx);

2169:    VecDuplicate(oldmat->slvec0,&a->slvec0);
2170:   PetscLogObjectParent(mat,a->slvec0);
2171:    VecDuplicate(oldmat->slvec1,&a->slvec1);
2172:   PetscLogObjectParent(mat,a->slvec1);

2174:   VecGetLocalSize(a->slvec1,&nt);
2175:   VecGetArray(a->slvec1,&array);
2176:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,bs*mbs,array,&a->slvec1a);
2177:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
2178:   VecRestoreArray(a->slvec1,&array);
2179:   VecGetArray(a->slvec0,&array);
2180:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
2181:   VecRestoreArray(a->slvec0,&array);
2182:   PetscLogObjectParent(mat,a->slvec0);
2183:   PetscLogObjectParent(mat,a->slvec1);
2184:   PetscLogObjectParent(mat,a->slvec0b);
2185:   PetscLogObjectParent(mat,a->slvec1a);
2186:   PetscLogObjectParent(mat,a->slvec1b);

2188:   /*  VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
2189:   PetscObjectReference((PetscObject)oldmat->sMvctx);
2190:   a->sMvctx = oldmat->sMvctx;
2191:   PetscLogObjectParent(mat,a->sMvctx);

2193:    MatDuplicate(oldmat->A,cpvalues,&a->A);
2194:   PetscLogObjectParent(mat,a->A);
2195:    MatDuplicate(oldmat->B,cpvalues,&a->B);
2196:   PetscLogObjectParent(mat,a->B);
2197:   PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2198:   *newmat = mat;
2199:   return(0);
2200: }

2204: PetscErrorCode MatLoad_MPISBAIJ(Mat newmat,PetscViewer viewer)
2205: {
2207:   PetscInt       i,nz,j,rstart,rend;
2208:   PetscScalar    *vals,*buf;
2209:   MPI_Comm       comm = ((PetscObject)viewer)->comm;
2210:   MPI_Status     status;
2211:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners,*locrowlens,mmbs;
2212:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2213:   PetscInt       *procsnz = 0,jj,*mycols,*ibuf;
2214:   PetscInt       bs=1,Mbs,mbs,extra_rows;
2215:   PetscInt       *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2216:   PetscInt       dcount,kmax,k,nzcount,tmp,sizesset=1,grows,gcols;
2217:   int            fd;
2218: 
2220:   PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 2","Mat");
2221:     PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);
2222:   PetscOptionsEnd();

2224:   MPI_Comm_size(comm,&size);
2225:   MPI_Comm_rank(comm,&rank);
2226:   if (!rank) {
2227:     PetscViewerBinaryGetDescriptor(viewer,&fd);
2228:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2229:     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2230:     if (header[3] < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2231:   }

2233:   if (newmat->rmap->n < 0 && newmat->rmap->N < 0 && newmat->cmap->n < 0 && newmat->cmap->N < 0) sizesset = 0;

2235:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2236:   M = header[1]; N = header[2];

2238:   /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */
2239:   if (sizesset && newmat->rmap->N < 0) newmat->rmap->N = M;
2240:   if (sizesset && newmat->cmap->N < 0) newmat->cmap->N = N;
2241: 
2242:   /* If global sizes are set, check if they are consistent with that given in the file */
2243:   if (sizesset) {
2244:     MatGetSize(newmat,&grows,&gcols);
2245:   }
2246:   if (sizesset && newmat->rmap->N != grows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%d) and input matrix has (%d)",M,grows);
2247:   if (sizesset && newmat->cmap->N != gcols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%d) and input matrix has (%d)",N,gcols);

2249:   if (M != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Can only do square matrices");

2251:   /* 
2252:      This code adds extra rows to make sure the number of rows is 
2253:      divisible by the blocksize
2254:   */
2255:   Mbs        = M/bs;
2256:   extra_rows = bs - M + bs*(Mbs);
2257:   if (extra_rows == bs) extra_rows = 0;
2258:   else                  Mbs++;
2259:   if (extra_rows &&!rank) {
2260:     PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");
2261:   }

2263:   /* determine ownership of all rows */
2264:   if (newmat->rmap->n < 0) { /* PETSC_DECIDE */
2265:     mbs        = Mbs/size + ((Mbs % size) > rank);
2266:     m          = mbs*bs;
2267:   } else { /* User Set */
2268:     m          = newmat->rmap->n;
2269:     mbs        = m/bs;
2270:   }
2271:   PetscMalloc2(size+1,PetscMPIInt,&rowners,size+1,PetscMPIInt,&browners);
2272:   mmbs       = PetscMPIIntCast(mbs);
2273:   MPI_Allgather(&mmbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2274:   rowners[0] = 0;
2275:   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2276:   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2277:   rstart = rowners[rank];
2278:   rend   = rowners[rank+1];
2279: 
2280:   /* distribute row lengths to all processors */
2281:   PetscMalloc((rend-rstart)*bs*sizeof(PetscMPIInt),&locrowlens);
2282:   if (!rank) {
2283:     PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2284:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2285:     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2286:     PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2287:     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2288:     MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2289:     PetscFree(sndcounts);
2290:   } else {
2291:     MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2292:   }
2293: 
2294:   if (!rank) {   /* procs[0] */
2295:     /* calculate the number of nonzeros on each processor */
2296:     PetscMalloc(size*sizeof(PetscInt),&procsnz);
2297:     PetscMemzero(procsnz,size*sizeof(PetscInt));
2298:     for (i=0; i<size; i++) {
2299:       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2300:         procsnz[i] += rowlengths[j];
2301:       }
2302:     }
2303:     PetscFree(rowlengths);
2304: 
2305:     /* determine max buffer needed and allocate it */
2306:     maxnz = 0;
2307:     for (i=0; i<size; i++) {
2308:       maxnz = PetscMax(maxnz,procsnz[i]);
2309:     }
2310:     PetscMalloc(maxnz*sizeof(PetscInt),&cols);

2312:     /* read in my part of the matrix column indices  */
2313:     nz     = procsnz[0];
2314:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2315:     mycols = ibuf;
2316:     if (size == 1)  nz -= extra_rows;
2317:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2318:     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }

2320:     /* read in every ones (except the last) and ship off */
2321:     for (i=1; i<size-1; i++) {
2322:       nz   = procsnz[i];
2323:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2324:       MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2325:     }
2326:     /* read in the stuff for the last proc */
2327:     if (size != 1) {
2328:       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2329:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2330:       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2331:       MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2332:     }
2333:     PetscFree(cols);
2334:   } else {  /* procs[i], i>0 */
2335:     /* determine buffer space needed for message */
2336:     nz = 0;
2337:     for (i=0; i<m; i++) {
2338:       nz += locrowlens[i];
2339:     }
2340:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2341:     mycols = ibuf;
2342:     /* receive message of column indices*/
2343:     MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2344:     MPI_Get_count(&status,MPIU_INT,&maxnz);
2345:     if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2346:   }

2348:   /* loop over local rows, determining number of off diagonal entries */
2349:   PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);
2350:   PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);
2351:   PetscMemzero(mask,Mbs*sizeof(PetscInt));
2352:   PetscMemzero(masked1,Mbs*sizeof(PetscInt));
2353:   PetscMemzero(masked2,Mbs*sizeof(PetscInt));
2354:   rowcount = 0;
2355:   nzcount  = 0;
2356:   for (i=0; i<mbs; i++) {
2357:     dcount  = 0;
2358:     odcount = 0;
2359:     for (j=0; j<bs; j++) {
2360:       kmax = locrowlens[rowcount];
2361:       for (k=0; k<kmax; k++) {
2362:         tmp = mycols[nzcount++]/bs; /* block col. index */
2363:         if (!mask[tmp]) {
2364:           mask[tmp] = 1;
2365:           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2366:           else masked1[dcount++] = tmp; /* entry in diag portion */
2367:         }
2368:       }
2369:       rowcount++;
2370:     }
2371: 
2372:     dlens[i]  = dcount;  /* d_nzz[i] */
2373:     odlens[i] = odcount; /* o_nzz[i] */

2375:     /* zero out the mask elements we set */
2376:     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2377:     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2378:   }
2379:     if (!sizesset) {
2380:     MatSetSizes(newmat,m,m,M+extra_rows,N+extra_rows);
2381:   }
2382:   MatMPISBAIJSetPreallocation(newmat,bs,0,dlens,0,odlens);
2383:   MatSetOption(newmat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE);
2384: 
2385:   if (!rank) {
2386:     PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2387:     /* read in my part of the matrix numerical values  */
2388:     nz = procsnz[0];
2389:     vals = buf;
2390:     mycols = ibuf;
2391:     if (size == 1)  nz -= extra_rows;
2392:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2393:     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }

2395:     /* insert into matrix */
2396:     jj      = rstart*bs;
2397:     for (i=0; i<m; i++) {
2398:       MatSetValues(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2399:       mycols += locrowlens[i];
2400:       vals   += locrowlens[i];
2401:       jj++;
2402:     }

2404:     /* read in other processors (except the last one) and ship out */
2405:     for (i=1; i<size-1; i++) {
2406:       nz   = procsnz[i];
2407:       vals = buf;
2408:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2409:       MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
2410:     }
2411:     /* the last proc */
2412:     if (size != 1){
2413:       nz   = procsnz[i] - extra_rows;
2414:       vals = buf;
2415:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2416:       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2417:       MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)newmat)->tag,comm);
2418:     }
2419:     PetscFree(procsnz);

2421:   } else {
2422:     /* receive numeric values */
2423:     PetscMalloc(nz*sizeof(PetscScalar),&buf);

2425:     /* receive message of values*/
2426:     vals   = buf;
2427:     mycols = ibuf;
2428:     MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm,&status);
2429:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2430:     if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

2432:     /* insert into matrix */
2433:     jj      = rstart*bs;
2434:     for (i=0; i<m; i++) {
2435:       MatSetValues_MPISBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2436:       mycols += locrowlens[i];
2437:       vals   += locrowlens[i];
2438:       jj++;
2439:     }
2440:   }

2442:   PetscFree(locrowlens);
2443:   PetscFree(buf);
2444:   PetscFree(ibuf);
2445:   PetscFree2(rowners,browners);
2446:   PetscFree2(dlens,odlens);
2447:   PetscFree3(mask,masked1,masked2);
2448:   MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
2449:   MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
2450:   return(0);
2451: }

2455: /*XXXXX@
2456:    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.

2458:    Input Parameters:
2459: .  mat  - the matrix
2460: .  fact - factor

2462:    Not Collective on Mat, each process can have a different hash factor

2464:    Level: advanced

2466:   Notes:
2467:    This can also be set by the command line option: -mat_use_hash_table fact

2469: .keywords: matrix, hashtable, factor, HT

2471: .seealso: MatSetOption()
2472: @XXXXX*/


2477: PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat A,Vec v,PetscInt idx[])
2478: {
2479:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
2480:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(a->B)->data;
2481:   PetscReal      atmp;
2482:   PetscReal      *work,*svalues,*rvalues;
2484:   PetscInt       i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2485:   PetscMPIInt    rank,size;
2486:   PetscInt       *rowners_bs,dest,count,source;
2487:   PetscScalar    *va;
2488:   MatScalar      *ba;
2489:   MPI_Status     stat;

2492:   if (idx) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Send email to petsc-maint@mcs.anl.gov");
2493:   MatGetRowMaxAbs(a->A,v,PETSC_NULL);
2494:   VecGetArray(v,&va);

2496:   MPI_Comm_size(((PetscObject)A)->comm,&size);
2497:   MPI_Comm_rank(((PetscObject)A)->comm,&rank);

2499:   bs   = A->rmap->bs;
2500:   mbs  = a->mbs;
2501:   Mbs  = a->Mbs;
2502:   ba   = b->a;
2503:   bi   = b->i;
2504:   bj   = b->j;

2506:   /* find ownerships */
2507:   rowners_bs = A->rmap->range;

2509:   /* each proc creates an array to be distributed */
2510:   PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2511:   PetscMemzero(work,bs*Mbs*sizeof(PetscReal));

2513:   /* row_max for B */
2514:   if (rank != size-1){
2515:     for (i=0; i<mbs; i++) {
2516:       ncols = bi[1] - bi[0]; bi++;
2517:       brow  = bs*i;
2518:       for (j=0; j<ncols; j++){
2519:         bcol = bs*(*bj);
2520:         for (kcol=0; kcol<bs; kcol++){
2521:           col = bcol + kcol;                 /* local col index */
2522:           col += rowners_bs[rank+1];      /* global col index */
2523:           for (krow=0; krow<bs; krow++){
2524:             atmp = PetscAbsScalar(*ba); ba++;
2525:             row = brow + krow;    /* local row index */
2526:             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2527:             if (work[col] < atmp) work[col] = atmp;
2528:           }
2529:         }
2530:         bj++;
2531:       }
2532:     }

2534:     /* send values to its owners */
2535:     for (dest=rank+1; dest<size; dest++){
2536:       svalues = work + rowners_bs[dest];
2537:       count   = rowners_bs[dest+1]-rowners_bs[dest];
2538:       MPI_Send(svalues,count,MPIU_REAL,dest,rank,((PetscObject)A)->comm);
2539:     }
2540:   }
2541: 
2542:   /* receive values */
2543:   if (rank){
2544:     rvalues = work;
2545:     count   = rowners_bs[rank+1]-rowners_bs[rank];
2546:     for (source=0; source<rank; source++){
2547:       MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,((PetscObject)A)->comm,&stat);
2548:       /* process values */
2549:       for (i=0; i<count; i++){
2550:         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2551:       }
2552:     }
2553:   }

2555:   VecRestoreArray(v,&va);
2556:   PetscFree(work);
2557:   return(0);
2558: }

2562: PetscErrorCode MatSOR_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2563: {
2564:   Mat_MPISBAIJ      *mat = (Mat_MPISBAIJ*)matin->data;
2565:   PetscErrorCode    ierr;
2566:   PetscInt          mbs=mat->mbs,bs=matin->rmap->bs;
2567:   PetscScalar       *x,*ptr,*from;
2568:   Vec               bb1;
2569:   const PetscScalar *b;
2570: 
2572:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2573:   if (bs > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2575:   if (flag == SOR_APPLY_UPPER) {
2576:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2577:     return(0);
2578:   }

2580:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2581:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2582:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2583:       its--;
2584:     }

2586:     VecDuplicate(bb,&bb1);
2587:     while (its--){
2588: 
2589:       /* lower triangular part: slvec0b = - B^T*xx */
2590:       (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2591: 
2592:       /* copy xx into slvec0a */
2593:       VecGetArray(mat->slvec0,&ptr);
2594:       VecGetArray(xx,&x);
2595:       PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2596:       VecRestoreArray(mat->slvec0,&ptr);

2598:       VecScale(mat->slvec0,-1.0);

2600:       /* copy bb into slvec1a */
2601:       VecGetArray(mat->slvec1,&ptr);
2602:       VecGetArrayRead(bb,&b);
2603:       PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2604:       VecRestoreArray(mat->slvec1,&ptr);

2606:       /* set slvec1b = 0 */
2607:       VecSet(mat->slvec1b,0.0);

2609:       VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2610:       VecRestoreArray(xx,&x);
2611:       VecRestoreArrayRead(bb,&b);
2612:       VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);

2614:       /* upper triangular part: bb1 = bb1 - B*x */
2615:       (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2616: 
2617:       /* local diagonal sweep */
2618:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2619:     }
2620:     VecDestroy(&bb1);
2621:   } else if ((flag & SOR_LOCAL_FORWARD_SWEEP) && (its == 1) && (flag & SOR_ZERO_INITIAL_GUESS)){
2622:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2623:   } else if ((flag & SOR_LOCAL_BACKWARD_SWEEP) && (its == 1) && (flag & SOR_ZERO_INITIAL_GUESS)){
2624:     (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2625:   } else if (flag & SOR_EISENSTAT) {
2626:     Vec               xx1;
2627:     PetscBool         hasop;
2628:     const PetscScalar *diag;
2629:     PetscScalar       *sl,scale = (omega - 2.0)/omega;
2630:     PetscInt          i,n;

2632:     if (!mat->xx1) {
2633:       VecDuplicate(bb,&mat->xx1);
2634:       VecDuplicate(bb,&mat->bb1);
2635:     }
2636:     xx1 = mat->xx1;
2637:     bb1 = mat->bb1;

2639:     (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);

2641:     if (!mat->diag) {
2642:       /* this is wrong for same matrix with new nonzero values */
2643:       MatGetVecs(matin,&mat->diag,PETSC_NULL);
2644:       MatGetDiagonal(matin,mat->diag);
2645:     }
2646:     MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);

2648:     if (hasop) {
2649:       MatMultDiagonalBlock(matin,xx,bb1);
2650:       VecAYPX(mat->slvec1a,scale,bb);
2651:     } else {
2652:       /*
2653:           These two lines are replaced by code that may be a bit faster for a good compiler
2654:       VecPointwiseMult(mat->slvec1a,mat->diag,xx);
2655:       VecAYPX(mat->slvec1a,scale,bb);
2656:       */
2657:       VecGetArray(mat->slvec1a,&sl);
2658:       VecGetArrayRead(mat->diag,&diag);
2659:       VecGetArrayRead(bb,&b);
2660:       VecGetArray(xx,&x);
2661:       VecGetLocalSize(xx,&n);
2662:       if (omega == 1.0) {
2663:         for (i=0; i<n; i++) {
2664:           sl[i] = b[i] - diag[i]*x[i];
2665:         }
2666:         PetscLogFlops(2.0*n);
2667:       } else {
2668:         for (i=0; i<n; i++) {
2669:           sl[i] = b[i] + scale*diag[i]*x[i];
2670:         }
2671:         PetscLogFlops(3.0*n);
2672:       }
2673:       VecRestoreArray(mat->slvec1a,&sl);
2674:       VecRestoreArrayRead(mat->diag,&diag);
2675:       VecRestoreArrayRead(bb,&b);
2676:       VecRestoreArray(xx,&x);
2677:     }

2679:     /* multiply off-diagonal portion of matrix */
2680:     VecSet(mat->slvec1b,0.0);
2681:     (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2682:     VecGetArray(mat->slvec0,&from);
2683:     VecGetArray(xx,&x);
2684:     PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
2685:     VecRestoreArray(mat->slvec0,&from);
2686:     VecRestoreArray(xx,&x);
2687:     VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2688:     VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2689:     (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,mat->slvec1a);

2691:     /* local sweep */
2692:     (*mat->A->ops->sor)(mat->A,mat->slvec1a,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
2693:     VecAXPY(xx,1.0,xx1);
2694:   } else {
2695:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2696:   }
2697:   return(0);
2698: }

2702: PetscErrorCode MatSOR_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2703: {
2704:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2706:   Vec            lvec1,bb1;
2707: 
2709:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2710:   if (matin->rmap->bs > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2712:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2713:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2714:       (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2715:       its--;
2716:     }

2718:     VecDuplicate(mat->lvec,&lvec1);
2719:     VecDuplicate(bb,&bb1);
2720:     while (its--){
2721:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2722: 
2723:       /* lower diagonal part: bb1 = bb - B^T*xx */
2724:       (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2725:       VecScale(lvec1,-1.0);

2727:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2728:       VecCopy(bb,bb1);
2729:       VecScatterBegin(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);

2731:       /* upper diagonal part: bb1 = bb1 - B*x */
2732:       VecScale(mat->lvec,-1.0);
2733:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);

2735:       VecScatterEnd(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2736: 
2737:       /* diagonal sweep */
2738:       (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2739:     }
2740:     VecDestroy(&lvec1);
2741:     VecDestroy(&bb1);
2742:   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2743:   return(0);
2744: }

2748: /*@
2749:      MatCreateMPISBAIJWithArrays - creates a MPI SBAIJ matrix using arrays that contain in standard
2750:          CSR format the local rows. 

2752:    Collective on MPI_Comm

2754:    Input Parameters:
2755: +  comm - MPI communicator
2756: .  bs - the block size, only a block size of 1 is supported
2757: .  m - number of local rows (Cannot be PETSC_DECIDE)
2758: .  n - This value should be the same as the local size used in creating the 
2759:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
2760:        calculated if N is given) For square matrices n is almost always m.
2761: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
2762: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
2763: .   i - row indices
2764: .   j - column indices
2765: -   a - matrix values

2767:    Output Parameter:
2768: .   mat - the matrix

2770:    Level: intermediate

2772:    Notes:
2773:        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
2774:      thus you CANNOT change the matrix entries by changing the values of a[] after you have 
2775:      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.

2777:        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.

2779: .keywords: matrix, aij, compressed row, sparse, parallel

2781: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
2782:           MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
2783: @*/
2784: PetscErrorCode  MatCreateMPISBAIJWithArrays(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
2785: {


2790:   if (i[0]) {
2791:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
2792:   }
2793:   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
2794:   MatCreate(comm,mat);
2795:   MatSetSizes(*mat,m,n,M,N);
2796:   MatSetType(*mat,MATMPISBAIJ);
2797:   MatMPISBAIJSetPreallocationCSR(*mat,bs,i,j,a);
2798:   return(0);
2799: }


2804: /*@C
2805:    MatMPISBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in BAIJ format
2806:    (the default parallel PETSc format).  

2808:    Collective on MPI_Comm

2810:    Input Parameters:
2811: +  A - the matrix 
2812: .  bs - the block size
2813: .  i - the indices into j for the start of each local row (starts with zero)
2814: .  j - the column indices for each local row (starts with zero) these must be sorted for each row
2815: -  v - optional values in the matrix

2817:    Level: developer

2819: .keywords: matrix, aij, compressed row, sparse, parallel

2821: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateAIJ(), MPIAIJ
2822: @*/
2823: PetscErrorCode  MatMPISBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
2824: {

2828:   PetscTryMethod(B,"MatMPISBAIJSetPreallocationCSR_C",(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,bs,i,j,v));
2829:   return(0);
2830: }