Actual source code: mpibaij.c
petsc-3.7.3 2016-08-01
2: #include <../src/mat/impls/baij/mpi/mpibaij.h> /*I "petscmat.h" I*/
4: #include <petscblaslapack.h>
5: #include <petscsf.h>
9: PetscErrorCode MatGetRowMaxAbs_MPIBAIJ(Mat A,Vec v,PetscInt idx[])
10: {
11: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
13: PetscInt i,*idxb = 0;
14: PetscScalar *va,*vb;
15: Vec vtmp;
18: MatGetRowMaxAbs(a->A,v,idx);
19: VecGetArray(v,&va);
20: if (idx) {
21: for (i=0; i<A->rmap->n; i++) {
22: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
23: }
24: }
26: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
27: if (idx) {PetscMalloc1(A->rmap->n,&idxb);}
28: MatGetRowMaxAbs(a->B,vtmp,idxb);
29: VecGetArray(vtmp,&vb);
31: for (i=0; i<A->rmap->n; i++) {
32: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
33: va[i] = vb[i];
34: if (idx) idx[i] = A->cmap->bs*a->garray[idxb[i]/A->cmap->bs] + (idxb[i] % A->cmap->bs);
35: }
36: }
38: VecRestoreArray(v,&va);
39: VecRestoreArray(vtmp,&vb);
40: PetscFree(idxb);
41: VecDestroy(&vtmp);
42: return(0);
43: }
47: PetscErrorCode MatStoreValues_MPIBAIJ(Mat mat)
48: {
49: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)mat->data;
53: MatStoreValues(aij->A);
54: MatStoreValues(aij->B);
55: return(0);
56: }
60: PetscErrorCode MatRetrieveValues_MPIBAIJ(Mat mat)
61: {
62: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)mat->data;
66: MatRetrieveValues(aij->A);
67: MatRetrieveValues(aij->B);
68: return(0);
69: }
71: /*
72: Local utility routine that creates a mapping from the global column
73: number to the local number in the off-diagonal part of the local
74: storage of the matrix. This is done in a non scalable way since the
75: length of colmap equals the global matrix length.
76: */
79: PetscErrorCode MatCreateColmap_MPIBAIJ_Private(Mat mat)
80: {
81: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
82: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
84: PetscInt nbs = B->nbs,i,bs=mat->rmap->bs;
87: #if defined(PETSC_USE_CTABLE)
88: PetscTableCreate(baij->nbs,baij->Nbs+1,&baij->colmap);
89: for (i=0; i<nbs; i++) {
90: PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1,INSERT_VALUES);
91: }
92: #else
93: PetscMalloc1(baij->Nbs+1,&baij->colmap);
94: PetscLogObjectMemory((PetscObject)mat,baij->Nbs*sizeof(PetscInt));
95: PetscMemzero(baij->colmap,baij->Nbs*sizeof(PetscInt));
96: for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1;
97: #endif
98: return(0);
99: }
101: #define MatSetValues_SeqBAIJ_A_Private(row,col,value,addv,orow,ocol) \
102: { \
103: \
104: brow = row/bs; \
105: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
106: rmax = aimax[brow]; nrow = ailen[brow]; \
107: bcol = col/bs; \
108: ridx = row % bs; cidx = col % bs; \
109: low = 0; high = nrow; \
110: while (high-low > 3) { \
111: t = (low+high)/2; \
112: if (rp[t] > bcol) high = t; \
113: else low = t; \
114: } \
115: for (_i=low; _i<high; _i++) { \
116: if (rp[_i] > bcol) break; \
117: if (rp[_i] == bcol) { \
118: bap = ap + bs2*_i + bs*cidx + ridx; \
119: if (addv == ADD_VALUES) *bap += value; \
120: else *bap = value; \
121: goto a_noinsert; \
122: } \
123: } \
124: if (a->nonew == 1) goto a_noinsert; \
125: if (a->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
126: MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
127: N = nrow++ - 1; \
128: /* shift up all the later entries in this row */ \
129: for (ii=N; ii>=_i; ii--) { \
130: rp[ii+1] = rp[ii]; \
131: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
132: } \
133: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
134: rp[_i] = bcol; \
135: ap[bs2*_i + bs*cidx + ridx] = value; \
136: a_noinsert:; \
137: ailen[brow] = nrow; \
138: }
140: #define MatSetValues_SeqBAIJ_B_Private(row,col,value,addv,orow,ocol) \
141: { \
142: brow = row/bs; \
143: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
144: rmax = bimax[brow]; nrow = bilen[brow]; \
145: bcol = col/bs; \
146: ridx = row % bs; cidx = col % bs; \
147: low = 0; high = nrow; \
148: while (high-low > 3) { \
149: t = (low+high)/2; \
150: if (rp[t] > bcol) high = t; \
151: else low = t; \
152: } \
153: for (_i=low; _i<high; _i++) { \
154: if (rp[_i] > bcol) break; \
155: if (rp[_i] == bcol) { \
156: bap = ap + bs2*_i + bs*cidx + ridx; \
157: if (addv == ADD_VALUES) *bap += value; \
158: else *bap = value; \
159: goto b_noinsert; \
160: } \
161: } \
162: if (b->nonew == 1) goto b_noinsert; \
163: if (b->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
164: MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
165: N = nrow++ - 1; \
166: /* shift up all the later entries in this row */ \
167: for (ii=N; ii>=_i; ii--) { \
168: rp[ii+1] = rp[ii]; \
169: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
170: } \
171: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
172: rp[_i] = bcol; \
173: ap[bs2*_i + bs*cidx + ridx] = value; \
174: b_noinsert:; \
175: bilen[brow] = nrow; \
176: }
180: PetscErrorCode MatSetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
181: {
182: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
183: MatScalar value;
184: PetscBool roworiented = baij->roworiented;
186: PetscInt i,j,row,col;
187: PetscInt rstart_orig=mat->rmap->rstart;
188: PetscInt rend_orig =mat->rmap->rend,cstart_orig=mat->cmap->rstart;
189: PetscInt cend_orig =mat->cmap->rend,bs=mat->rmap->bs;
191: /* Some Variables required in the macro */
192: Mat A = baij->A;
193: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data;
194: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
195: MatScalar *aa =a->a;
197: Mat B = baij->B;
198: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
199: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
200: MatScalar *ba =b->a;
202: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
203: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
204: MatScalar *ap,*bap;
207: for (i=0; i<m; i++) {
208: if (im[i] < 0) continue;
209: #if defined(PETSC_USE_DEBUG)
210: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
211: #endif
212: if (im[i] >= rstart_orig && im[i] < rend_orig) {
213: row = im[i] - rstart_orig;
214: for (j=0; j<n; j++) {
215: if (in[j] >= cstart_orig && in[j] < cend_orig) {
216: col = in[j] - cstart_orig;
217: if (roworiented) value = v[i*n+j];
218: else value = v[i+j*m];
219: MatSetValues_SeqBAIJ_A_Private(row,col,value,addv,im[i],in[j]);
220: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
221: } else if (in[j] < 0) continue;
222: #if defined(PETSC_USE_DEBUG)
223: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
224: #endif
225: else {
226: if (mat->was_assembled) {
227: if (!baij->colmap) {
228: MatCreateColmap_MPIBAIJ_Private(mat);
229: }
230: #if defined(PETSC_USE_CTABLE)
231: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
232: col = col - 1;
233: #else
234: col = baij->colmap[in[j]/bs] - 1;
235: #endif
236: if (col < 0 && !((Mat_SeqBAIJ*)(baij->B->data))->nonew) {
237: MatDisAssemble_MPIBAIJ(mat);
238: col = in[j];
239: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
240: B = baij->B;
241: b = (Mat_SeqBAIJ*)(B)->data;
242: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
243: ba =b->a;
244: } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", im[i], in[j]);
245: else col += in[j]%bs;
246: } else col = in[j];
247: if (roworiented) value = v[i*n+j];
248: else value = v[i+j*m];
249: MatSetValues_SeqBAIJ_B_Private(row,col,value,addv,im[i],in[j]);
250: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
251: }
252: }
253: } else {
254: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
255: if (!baij->donotstash) {
256: mat->assembled = PETSC_FALSE;
257: if (roworiented) {
258: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);
259: } else {
260: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);
261: }
262: }
263: }
264: }
265: return(0);
266: }
270: PETSC_STATIC_INLINE PetscErrorCode MatSetValuesBlocked_SeqBAIJ_Inlined(Mat A,PetscInt row,PetscInt col,const PetscScalar v[],InsertMode is,PetscInt orow,PetscInt ocol)
271: {
272: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data;
273: PetscInt *rp,low,high,t,ii,jj,nrow,i,rmax,N;
274: PetscInt *imax=a->imax,*ai=a->i,*ailen=a->ilen;
275: PetscErrorCode ierr;
276: PetscInt *aj =a->j,nonew=a->nonew,bs2=a->bs2,bs=A->rmap->bs;
277: PetscBool roworiented=a->roworiented;
278: const PetscScalar *value = v;
279: MatScalar *ap,*aa = a->a,*bap;
282: rp = aj + ai[row];
283: ap = aa + bs2*ai[row];
284: rmax = imax[row];
285: nrow = ailen[row];
286: value = v;
287: low = 0;
288: high = nrow;
289: while (high-low > 7) {
290: t = (low+high)/2;
291: if (rp[t] > col) high = t;
292: else low = t;
293: }
294: for (i=low; i<high; i++) {
295: if (rp[i] > col) break;
296: if (rp[i] == col) {
297: bap = ap + bs2*i;
298: if (roworiented) {
299: if (is == ADD_VALUES) {
300: for (ii=0; ii<bs; ii++) {
301: for (jj=ii; jj<bs2; jj+=bs) {
302: bap[jj] += *value++;
303: }
304: }
305: } else {
306: for (ii=0; ii<bs; ii++) {
307: for (jj=ii; jj<bs2; jj+=bs) {
308: bap[jj] = *value++;
309: }
310: }
311: }
312: } else {
313: if (is == ADD_VALUES) {
314: for (ii=0; ii<bs; ii++,value+=bs) {
315: for (jj=0; jj<bs; jj++) {
316: bap[jj] += value[jj];
317: }
318: bap += bs;
319: }
320: } else {
321: for (ii=0; ii<bs; ii++,value+=bs) {
322: for (jj=0; jj<bs; jj++) {
323: bap[jj] = value[jj];
324: }
325: bap += bs;
326: }
327: }
328: }
329: goto noinsert2;
330: }
331: }
332: if (nonew == 1) goto noinsert2;
333: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new global block indexed nonzero block (%D, %D) in the matrix", orow, ocol);
334: MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,row,col,rmax,aa,ai,aj,rp,ap,imax,nonew,MatScalar);
335: N = nrow++ - 1; high++;
336: /* shift up all the later entries in this row */
337: for (ii=N; ii>=i; ii--) {
338: rp[ii+1] = rp[ii];
339: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));
340: }
341: if (N >= i) {
342: PetscMemzero(ap+bs2*i,bs2*sizeof(MatScalar));
343: }
344: rp[i] = col;
345: bap = ap + bs2*i;
346: if (roworiented) {
347: for (ii=0; ii<bs; ii++) {
348: for (jj=ii; jj<bs2; jj+=bs) {
349: bap[jj] = *value++;
350: }
351: }
352: } else {
353: for (ii=0; ii<bs; ii++) {
354: for (jj=0; jj<bs; jj++) {
355: *bap++ = *value++;
356: }
357: }
358: }
359: noinsert2:;
360: ailen[row] = nrow;
361: return(0);
362: }
366: /*
367: This routine should be optimized so that the block copy at ** Here a copy is required ** below is not needed
368: by passing additional stride information into the MatSetValuesBlocked_SeqBAIJ_Inlined() routine
369: */
370: PetscErrorCode MatSetValuesBlocked_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
371: {
372: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
373: const PetscScalar *value;
374: MatScalar *barray = baij->barray;
375: PetscBool roworiented = baij->roworiented;
376: PetscErrorCode ierr;
377: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
378: PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval;
379: PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2;
382: if (!barray) {
383: PetscMalloc1(bs2,&barray);
384: baij->barray = barray;
385: }
387: if (roworiented) stepval = (n-1)*bs;
388: else stepval = (m-1)*bs;
390: for (i=0; i<m; i++) {
391: if (im[i] < 0) continue;
392: #if defined(PETSC_USE_DEBUG)
393: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Block indexed row too large %D max %D",im[i],baij->Mbs-1);
394: #endif
395: if (im[i] >= rstart && im[i] < rend) {
396: row = im[i] - rstart;
397: for (j=0; j<n; j++) {
398: /* If NumCol = 1 then a copy is not required */
399: if ((roworiented) && (n == 1)) {
400: barray = (MatScalar*)v + i*bs2;
401: } else if ((!roworiented) && (m == 1)) {
402: barray = (MatScalar*)v + j*bs2;
403: } else { /* Here a copy is required */
404: if (roworiented) {
405: value = v + (i*(stepval+bs) + j)*bs;
406: } else {
407: value = v + (j*(stepval+bs) + i)*bs;
408: }
409: for (ii=0; ii<bs; ii++,value+=bs+stepval) {
410: for (jj=0; jj<bs; jj++) barray[jj] = value[jj];
411: barray += bs;
412: }
413: barray -= bs2;
414: }
416: if (in[j] >= cstart && in[j] < cend) {
417: col = in[j] - cstart;
418: MatSetValuesBlocked_SeqBAIJ_Inlined(baij->A,row,col,barray,addv,im[i],in[j]);
419: } else if (in[j] < 0) continue;
420: #if defined(PETSC_USE_DEBUG)
421: else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Block indexed column too large %D max %D",in[j],baij->Nbs-1);
422: #endif
423: else {
424: if (mat->was_assembled) {
425: if (!baij->colmap) {
426: MatCreateColmap_MPIBAIJ_Private(mat);
427: }
429: #if defined(PETSC_USE_DEBUG)
430: #if defined(PETSC_USE_CTABLE)
431: { PetscInt data;
432: PetscTableFind(baij->colmap,in[j]+1,&data);
433: if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
434: }
435: #else
436: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
437: #endif
438: #endif
439: #if defined(PETSC_USE_CTABLE)
440: PetscTableFind(baij->colmap,in[j]+1,&col);
441: col = (col - 1)/bs;
442: #else
443: col = (baij->colmap[in[j]] - 1)/bs;
444: #endif
445: if (col < 0 && !((Mat_SeqBAIJ*)(baij->B->data))->nonew) {
446: MatDisAssemble_MPIBAIJ(mat);
447: col = in[j];
448: } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new blocked indexed nonzero block (%D, %D) into matrix",im[i],in[j]);
449: } else col = in[j];
450: MatSetValuesBlocked_SeqBAIJ_Inlined(baij->B,row,col,barray,addv,im[i],in[j]);
451: }
452: }
453: } else {
454: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process block indexed row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
455: if (!baij->donotstash) {
456: if (roworiented) {
457: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
458: } else {
459: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
460: }
461: }
462: }
463: }
464: return(0);
465: }
467: #define HASH_KEY 0.6180339887
468: #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp)))
469: /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
470: /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
473: PetscErrorCode MatSetValues_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
474: {
475: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
476: PetscBool roworiented = baij->roworiented;
478: PetscInt i,j,row,col;
479: PetscInt rstart_orig=mat->rmap->rstart;
480: PetscInt rend_orig =mat->rmap->rend,Nbs=baij->Nbs;
481: PetscInt h1,key,size=baij->ht_size,bs=mat->rmap->bs,*HT=baij->ht,idx;
482: PetscReal tmp;
483: MatScalar **HD = baij->hd,value;
484: #if defined(PETSC_USE_DEBUG)
485: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
486: #endif
489: for (i=0; i<m; i++) {
490: #if defined(PETSC_USE_DEBUG)
491: if (im[i] < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
492: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
493: #endif
494: row = im[i];
495: if (row >= rstart_orig && row < rend_orig) {
496: for (j=0; j<n; j++) {
497: col = in[j];
498: if (roworiented) value = v[i*n+j];
499: else value = v[i+j*m];
500: /* Look up PetscInto the Hash Table */
501: key = (row/bs)*Nbs+(col/bs)+1;
502: h1 = HASH(size,key,tmp);
505: idx = h1;
506: #if defined(PETSC_USE_DEBUG)
507: insert_ct++;
508: total_ct++;
509: if (HT[idx] != key) {
510: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++) ;
511: if (idx == size) {
512: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++) ;
513: if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
514: }
515: }
516: #else
517: if (HT[idx] != key) {
518: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++) ;
519: if (idx == size) {
520: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++) ;
521: if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
522: }
523: }
524: #endif
525: /* A HASH table entry is found, so insert the values at the correct address */
526: if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value;
527: else *(HD[idx]+ (col % bs)*bs + (row % bs)) = value;
528: }
529: } else if (!baij->donotstash) {
530: if (roworiented) {
531: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);
532: } else {
533: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);
534: }
535: }
536: }
537: #if defined(PETSC_USE_DEBUG)
538: baij->ht_total_ct = total_ct;
539: baij->ht_insert_ct = insert_ct;
540: #endif
541: return(0);
542: }
546: PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
547: {
548: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
549: PetscBool roworiented = baij->roworiented;
550: PetscErrorCode ierr;
551: PetscInt i,j,ii,jj,row,col;
552: PetscInt rstart=baij->rstartbs;
553: PetscInt rend =mat->rmap->rend,stepval,bs=mat->rmap->bs,bs2=baij->bs2,nbs2=n*bs2;
554: PetscInt h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs;
555: PetscReal tmp;
556: MatScalar **HD = baij->hd,*baij_a;
557: const PetscScalar *v_t,*value;
558: #if defined(PETSC_USE_DEBUG)
559: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
560: #endif
563: if (roworiented) stepval = (n-1)*bs;
564: else stepval = (m-1)*bs;
566: for (i=0; i<m; i++) {
567: #if defined(PETSC_USE_DEBUG)
568: if (im[i] < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",im[i]);
569: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],baij->Mbs-1);
570: #endif
571: row = im[i];
572: v_t = v + i*nbs2;
573: if (row >= rstart && row < rend) {
574: for (j=0; j<n; j++) {
575: col = in[j];
577: /* Look up into the Hash Table */
578: key = row*Nbs+col+1;
579: h1 = HASH(size,key,tmp);
581: idx = h1;
582: #if defined(PETSC_USE_DEBUG)
583: total_ct++;
584: insert_ct++;
585: if (HT[idx] != key) {
586: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++) ;
587: if (idx == size) {
588: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++) ;
589: if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
590: }
591: }
592: #else
593: if (HT[idx] != key) {
594: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++) ;
595: if (idx == size) {
596: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++) ;
597: if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
598: }
599: }
600: #endif
601: baij_a = HD[idx];
602: if (roworiented) {
603: /*value = v + i*(stepval+bs)*bs + j*bs;*/
604: /* value = v + (i*(stepval+bs)+j)*bs; */
605: value = v_t;
606: v_t += bs;
607: if (addv == ADD_VALUES) {
608: for (ii=0; ii<bs; ii++,value+=stepval) {
609: for (jj=ii; jj<bs2; jj+=bs) {
610: baij_a[jj] += *value++;
611: }
612: }
613: } else {
614: for (ii=0; ii<bs; ii++,value+=stepval) {
615: for (jj=ii; jj<bs2; jj+=bs) {
616: baij_a[jj] = *value++;
617: }
618: }
619: }
620: } else {
621: value = v + j*(stepval+bs)*bs + i*bs;
622: if (addv == ADD_VALUES) {
623: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
624: for (jj=0; jj<bs; jj++) {
625: baij_a[jj] += *value++;
626: }
627: }
628: } else {
629: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
630: for (jj=0; jj<bs; jj++) {
631: baij_a[jj] = *value++;
632: }
633: }
634: }
635: }
636: }
637: } else {
638: if (!baij->donotstash) {
639: if (roworiented) {
640: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
641: } else {
642: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
643: }
644: }
645: }
646: }
647: #if defined(PETSC_USE_DEBUG)
648: baij->ht_total_ct = total_ct;
649: baij->ht_insert_ct = insert_ct;
650: #endif
651: return(0);
652: }
656: PetscErrorCode MatGetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
657: {
658: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
660: PetscInt bs = mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend;
661: PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data;
664: for (i=0; i<m; i++) {
665: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
666: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
667: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
668: row = idxm[i] - bsrstart;
669: for (j=0; j<n; j++) {
670: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
671: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
672: if (idxn[j] >= bscstart && idxn[j] < bscend) {
673: col = idxn[j] - bscstart;
674: MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
675: } else {
676: if (!baij->colmap) {
677: MatCreateColmap_MPIBAIJ_Private(mat);
678: }
679: #if defined(PETSC_USE_CTABLE)
680: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
681: data--;
682: #else
683: data = baij->colmap[idxn[j]/bs]-1;
684: #endif
685: if ((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
686: else {
687: col = data + idxn[j]%bs;
688: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
689: }
690: }
691: }
692: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
693: }
694: return(0);
695: }
699: PetscErrorCode MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm)
700: {
701: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
702: Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data;
704: PetscInt i,j,bs2=baij->bs2,bs=baij->A->rmap->bs,nz,row,col;
705: PetscReal sum = 0.0;
706: MatScalar *v;
709: if (baij->size == 1) {
710: MatNorm(baij->A,type,nrm);
711: } else {
712: if (type == NORM_FROBENIUS) {
713: v = amat->a;
714: nz = amat->nz*bs2;
715: for (i=0; i<nz; i++) {
716: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
717: }
718: v = bmat->a;
719: nz = bmat->nz*bs2;
720: for (i=0; i<nz; i++) {
721: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
722: }
723: MPIU_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
724: *nrm = PetscSqrtReal(*nrm);
725: } else if (type == NORM_1) { /* max column sum */
726: PetscReal *tmp,*tmp2;
727: PetscInt *jj,*garray=baij->garray,cstart=baij->rstartbs;
728: PetscMalloc2(mat->cmap->N,&tmp,mat->cmap->N,&tmp2);
729: PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));
730: v = amat->a; jj = amat->j;
731: for (i=0; i<amat->nz; i++) {
732: for (j=0; j<bs; j++) {
733: col = bs*(cstart + *jj) + j; /* column index */
734: for (row=0; row<bs; row++) {
735: tmp[col] += PetscAbsScalar(*v); v++;
736: }
737: }
738: jj++;
739: }
740: v = bmat->a; jj = bmat->j;
741: for (i=0; i<bmat->nz; i++) {
742: for (j=0; j<bs; j++) {
743: col = bs*garray[*jj] + j;
744: for (row=0; row<bs; row++) {
745: tmp[col] += PetscAbsScalar(*v); v++;
746: }
747: }
748: jj++;
749: }
750: MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
751: *nrm = 0.0;
752: for (j=0; j<mat->cmap->N; j++) {
753: if (tmp2[j] > *nrm) *nrm = tmp2[j];
754: }
755: PetscFree2(tmp,tmp2);
756: } else if (type == NORM_INFINITY) { /* max row sum */
757: PetscReal *sums;
758: PetscMalloc1(bs,&sums);
759: sum = 0.0;
760: for (j=0; j<amat->mbs; j++) {
761: for (row=0; row<bs; row++) sums[row] = 0.0;
762: v = amat->a + bs2*amat->i[j];
763: nz = amat->i[j+1]-amat->i[j];
764: for (i=0; i<nz; i++) {
765: for (col=0; col<bs; col++) {
766: for (row=0; row<bs; row++) {
767: sums[row] += PetscAbsScalar(*v); v++;
768: }
769: }
770: }
771: v = bmat->a + bs2*bmat->i[j];
772: nz = bmat->i[j+1]-bmat->i[j];
773: for (i=0; i<nz; i++) {
774: for (col=0; col<bs; col++) {
775: for (row=0; row<bs; row++) {
776: sums[row] += PetscAbsScalar(*v); v++;
777: }
778: }
779: }
780: for (row=0; row<bs; row++) {
781: if (sums[row] > sum) sum = sums[row];
782: }
783: }
784: MPIU_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
785: PetscFree(sums);
786: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for this norm yet");
787: }
788: return(0);
789: }
791: /*
792: Creates the hash table, and sets the table
793: This table is created only once.
794: If new entried need to be added to the matrix
795: then the hash table has to be destroyed and
796: recreated.
797: */
800: PetscErrorCode MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor)
801: {
802: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
803: Mat A = baij->A,B=baij->B;
804: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b=(Mat_SeqBAIJ*)B->data;
805: PetscInt i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
807: PetscInt ht_size,bs2=baij->bs2,rstart=baij->rstartbs;
808: PetscInt cstart=baij->cstartbs,*garray=baij->garray,row,col,Nbs=baij->Nbs;
809: PetscInt *HT,key;
810: MatScalar **HD;
811: PetscReal tmp;
812: #if defined(PETSC_USE_INFO)
813: PetscInt ct=0,max=0;
814: #endif
817: if (baij->ht) return(0);
819: baij->ht_size = (PetscInt)(factor*nz);
820: ht_size = baij->ht_size;
822: /* Allocate Memory for Hash Table */
823: PetscCalloc2(ht_size,&baij->hd,ht_size,&baij->ht);
824: HD = baij->hd;
825: HT = baij->ht;
827: /* Loop Over A */
828: for (i=0; i<a->mbs; i++) {
829: for (j=ai[i]; j<ai[i+1]; j++) {
830: row = i+rstart;
831: col = aj[j]+cstart;
833: key = row*Nbs + col + 1;
834: h1 = HASH(ht_size,key,tmp);
835: for (k=0; k<ht_size; k++) {
836: if (!HT[(h1+k)%ht_size]) {
837: HT[(h1+k)%ht_size] = key;
838: HD[(h1+k)%ht_size] = a->a + j*bs2;
839: break;
840: #if defined(PETSC_USE_INFO)
841: } else {
842: ct++;
843: #endif
844: }
845: }
846: #if defined(PETSC_USE_INFO)
847: if (k> max) max = k;
848: #endif
849: }
850: }
851: /* Loop Over B */
852: for (i=0; i<b->mbs; i++) {
853: for (j=bi[i]; j<bi[i+1]; j++) {
854: row = i+rstart;
855: col = garray[bj[j]];
856: key = row*Nbs + col + 1;
857: h1 = HASH(ht_size,key,tmp);
858: for (k=0; k<ht_size; k++) {
859: if (!HT[(h1+k)%ht_size]) {
860: HT[(h1+k)%ht_size] = key;
861: HD[(h1+k)%ht_size] = b->a + j*bs2;
862: break;
863: #if defined(PETSC_USE_INFO)
864: } else {
865: ct++;
866: #endif
867: }
868: }
869: #if defined(PETSC_USE_INFO)
870: if (k> max) max = k;
871: #endif
872: }
873: }
875: /* Print Summary */
876: #if defined(PETSC_USE_INFO)
877: for (i=0,j=0; i<ht_size; i++) {
878: if (HT[i]) j++;
879: }
880: PetscInfo2(mat,"Average Search = %5.2f,max search = %D\n",(!j)? 0.0:((PetscReal)(ct+j))/j,max);
881: #endif
882: return(0);
883: }
887: PetscErrorCode MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode)
888: {
889: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
891: PetscInt nstash,reallocs;
894: if (baij->donotstash || mat->nooffprocentries) return(0);
896: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
897: MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);
898: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
899: PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
900: MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);
901: PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
902: return(0);
903: }
907: PetscErrorCode MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode)
908: {
909: Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data;
910: Mat_SeqBAIJ *a =(Mat_SeqBAIJ*)baij->A->data;
912: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
913: PetscInt *row,*col;
914: PetscBool r1,r2,r3,other_disassembled;
915: MatScalar *val;
916: PetscMPIInt n;
919: /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */
920: if (!baij->donotstash && !mat->nooffprocentries) {
921: while (1) {
922: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
923: if (!flg) break;
925: for (i=0; i<n;) {
926: /* Now identify the consecutive vals belonging to the same row */
927: for (j=i,rstart=row[j]; j<n; j++) {
928: if (row[j] != rstart) break;
929: }
930: if (j < n) ncols = j-i;
931: else ncols = n-i;
932: /* Now assemble all these values with a single function call */
933: MatSetValues_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
934: i = j;
935: }
936: }
937: MatStashScatterEnd_Private(&mat->stash);
938: /* Now process the block-stash. Since the values are stashed column-oriented,
939: set the roworiented flag to column oriented, and after MatSetValues()
940: restore the original flags */
941: r1 = baij->roworiented;
942: r2 = a->roworiented;
943: r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
945: baij->roworiented = PETSC_FALSE;
946: a->roworiented = PETSC_FALSE;
948: (((Mat_SeqBAIJ*)baij->B->data))->roworiented = PETSC_FALSE; /* b->roworiented */
949: while (1) {
950: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
951: if (!flg) break;
953: for (i=0; i<n;) {
954: /* Now identify the consecutive vals belonging to the same row */
955: for (j=i,rstart=row[j]; j<n; j++) {
956: if (row[j] != rstart) break;
957: }
958: if (j < n) ncols = j-i;
959: else ncols = n-i;
960: MatSetValuesBlocked_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,mat->insertmode);
961: i = j;
962: }
963: }
964: MatStashScatterEnd_Private(&mat->bstash);
966: baij->roworiented = r1;
967: a->roworiented = r2;
969: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworiented */
970: }
972: MatAssemblyBegin(baij->A,mode);
973: MatAssemblyEnd(baij->A,mode);
975: /* determine if any processor has disassembled, if so we must
976: also disassemble ourselfs, in order that we may reassemble. */
977: /*
978: if nonzero structure of submatrix B cannot change then we know that
979: no processor disassembled thus we can skip this stuff
980: */
981: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
982: MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
983: if (mat->was_assembled && !other_disassembled) {
984: MatDisAssemble_MPIBAIJ(mat);
985: }
986: }
988: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
989: MatSetUpMultiply_MPIBAIJ(mat);
990: }
991: MatAssemblyBegin(baij->B,mode);
992: MatAssemblyEnd(baij->B,mode);
994: #if defined(PETSC_USE_INFO)
995: if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
996: PetscInfo1(mat,"Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
998: baij->ht_total_ct = 0;
999: baij->ht_insert_ct = 0;
1000: }
1001: #endif
1002: if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
1003: MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);
1005: mat->ops->setvalues = MatSetValues_MPIBAIJ_HT;
1006: mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT;
1007: }
1009: PetscFree2(baij->rowvalues,baij->rowindices);
1011: baij->rowvalues = 0;
1013: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
1014: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
1015: PetscObjectState state = baij->A->nonzerostate + baij->B->nonzerostate;
1016: MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
1017: }
1018: return(0);
1019: }
1021: extern PetscErrorCode MatView_SeqBAIJ(Mat,PetscViewer);
1022: #include <petscdraw.h>
1025: static PetscErrorCode MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1026: {
1027: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1028: PetscErrorCode ierr;
1029: PetscMPIInt rank = baij->rank;
1030: PetscInt bs = mat->rmap->bs;
1031: PetscBool iascii,isdraw;
1032: PetscViewer sviewer;
1033: PetscViewerFormat format;
1036: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1037: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1038: if (iascii) {
1039: PetscViewerGetFormat(viewer,&format);
1040: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1041: MatInfo info;
1042: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1043: MatGetInfo(mat,MAT_LOCAL,&info);
1044: PetscViewerASCIIPushSynchronized(viewer);
1045: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
1046: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,mat->rmap->bs,(PetscInt)info.memory);
1047: MatGetInfo(baij->A,MAT_LOCAL,&info);
1048: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1049: MatGetInfo(baij->B,MAT_LOCAL,&info);
1050: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1051: PetscViewerFlush(viewer);
1052: PetscViewerASCIIPopSynchronized(viewer);
1053: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1054: VecScatterView(baij->Mvctx,viewer);
1055: return(0);
1056: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1057: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
1058: return(0);
1059: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1060: return(0);
1061: }
1062: }
1064: if (isdraw) {
1065: PetscDraw draw;
1066: PetscBool isnull;
1067: PetscViewerDrawGetDraw(viewer,0,&draw);
1068: PetscDrawIsNull(draw,&isnull);
1069: if (isnull) return(0);
1070: }
1072: {
1073: /* assemble the entire matrix onto first processor. */
1074: Mat A;
1075: Mat_SeqBAIJ *Aloc;
1076: PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
1077: MatScalar *a;
1078: const char *matname;
1080: /* Here we are creating a temporary matrix, so will assume MPIBAIJ is acceptable */
1081: /* Perhaps this should be the type of mat? */
1082: MatCreate(PetscObjectComm((PetscObject)mat),&A);
1083: if (!rank) {
1084: MatSetSizes(A,M,N,M,N);
1085: } else {
1086: MatSetSizes(A,0,0,M,N);
1087: }
1088: MatSetType(A,MATMPIBAIJ);
1089: MatMPIBAIJSetPreallocation(A,mat->rmap->bs,0,NULL,0,NULL);
1090: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
1091: PetscLogObjectParent((PetscObject)mat,(PetscObject)A);
1093: /* copy over the A part */
1094: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1095: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1096: PetscMalloc1(bs,&rvals);
1098: for (i=0; i<mbs; i++) {
1099: rvals[0] = bs*(baij->rstartbs + i);
1100: for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1;
1101: for (j=ai[i]; j<ai[i+1]; j++) {
1102: col = (baij->cstartbs+aj[j])*bs;
1103: for (k=0; k<bs; k++) {
1104: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
1105: col++; a += bs;
1106: }
1107: }
1108: }
1109: /* copy over the B part */
1110: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1111: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1112: for (i=0; i<mbs; i++) {
1113: rvals[0] = bs*(baij->rstartbs + i);
1114: for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1;
1115: for (j=ai[i]; j<ai[i+1]; j++) {
1116: col = baij->garray[aj[j]]*bs;
1117: for (k=0; k<bs; k++) {
1118: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
1119: col++; a += bs;
1120: }
1121: }
1122: }
1123: PetscFree(rvals);
1124: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1125: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1126: /*
1127: Everyone has to call to draw the matrix since the graphics waits are
1128: synchronized across all processors that share the PetscDraw object
1129: */
1130: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1131: PetscObjectGetName((PetscObject)mat,&matname);
1132: if (!rank) {
1133: PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,matname);
1134: MatView_SeqBAIJ(((Mat_MPIBAIJ*)(A->data))->A,sviewer);
1135: }
1136: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1137: PetscViewerFlush(viewer);
1138: MatDestroy(&A);
1139: }
1140: return(0);
1141: }
1145: static PetscErrorCode MatView_MPIBAIJ_Binary(Mat mat,PetscViewer viewer)
1146: {
1147: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)mat->data;
1148: Mat_SeqBAIJ *A = (Mat_SeqBAIJ*)a->A->data;
1149: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)a->B->data;
1151: PetscInt i,*row_lens,*crow_lens,bs = mat->rmap->bs,j,k,bs2=a->bs2,header[4],nz,rlen;
1152: PetscInt *range=0,nzmax,*column_indices,cnt,col,*garray = a->garray,cstart = mat->cmap->rstart/bs,len,pcnt,l,ll;
1153: int fd;
1154: PetscScalar *column_values;
1155: FILE *file;
1156: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
1157: PetscInt message_count,flowcontrolcount;
1160: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1161: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
1162: nz = bs2*(A->nz + B->nz);
1163: rlen = mat->rmap->n;
1164: PetscViewerBinaryGetDescriptor(viewer,&fd);
1165: if (!rank) {
1166: header[0] = MAT_FILE_CLASSID;
1167: header[1] = mat->rmap->N;
1168: header[2] = mat->cmap->N;
1170: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1171: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1172: /* get largest number of rows any processor has */
1173: range = mat->rmap->range;
1174: for (i=1; i<size; i++) {
1175: rlen = PetscMax(rlen,range[i+1] - range[i]);
1176: }
1177: } else {
1178: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1179: }
1181: PetscMalloc1(rlen/bs,&crow_lens);
1182: /* compute lengths of each row */
1183: for (i=0; i<a->mbs; i++) {
1184: crow_lens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1185: }
1186: /* store the row lengths to the file */
1187: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1188: if (!rank) {
1189: MPI_Status status;
1190: PetscMalloc1(rlen,&row_lens);
1191: rlen = (range[1] - range[0])/bs;
1192: for (i=0; i<rlen; i++) {
1193: for (j=0; j<bs; j++) {
1194: row_lens[i*bs+j] = bs*crow_lens[i];
1195: }
1196: }
1197: PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);
1198: for (i=1; i<size; i++) {
1199: rlen = (range[i+1] - range[i])/bs;
1200: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1201: MPI_Recv(crow_lens,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1202: for (k=0; k<rlen; k++) {
1203: for (j=0; j<bs; j++) {
1204: row_lens[k*bs+j] = bs*crow_lens[k];
1205: }
1206: }
1207: PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);
1208: }
1209: PetscViewerFlowControlEndMaster(viewer,&message_count);
1210: PetscFree(row_lens);
1211: } else {
1212: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1213: MPI_Send(crow_lens,mat->rmap->n/bs,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1214: PetscViewerFlowControlEndWorker(viewer,&message_count);
1215: }
1216: PetscFree(crow_lens);
1218: /* load up the local column indices. Include for all rows not just one for each block row since process 0 does not have the
1219: information needed to make it for each row from a block row. This does require more communication but still not more than
1220: the communication needed for the nonzero values */
1221: nzmax = nz; /* space a largest processor needs */
1222: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));
1223: PetscMalloc1(nzmax,&column_indices);
1224: cnt = 0;
1225: for (i=0; i<a->mbs; i++) {
1226: pcnt = cnt;
1227: for (j=B->i[i]; j<B->i[i+1]; j++) {
1228: if ((col = garray[B->j[j]]) > cstart) break;
1229: for (l=0; l<bs; l++) {
1230: column_indices[cnt++] = bs*col+l;
1231: }
1232: }
1233: for (k=A->i[i]; k<A->i[i+1]; k++) {
1234: for (l=0; l<bs; l++) {
1235: column_indices[cnt++] = bs*(A->j[k] + cstart)+l;
1236: }
1237: }
1238: for (; j<B->i[i+1]; j++) {
1239: for (l=0; l<bs; l++) {
1240: column_indices[cnt++] = bs*garray[B->j[j]]+l;
1241: }
1242: }
1243: len = cnt - pcnt;
1244: for (k=1; k<bs; k++) {
1245: PetscMemcpy(&column_indices[cnt],&column_indices[pcnt],len*sizeof(PetscInt));
1246: cnt += len;
1247: }
1248: }
1249: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1251: /* store the columns to the file */
1252: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1253: if (!rank) {
1254: MPI_Status status;
1255: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1256: for (i=1; i<size; i++) {
1257: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1258: MPI_Recv(&cnt,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1259: MPI_Recv(column_indices,cnt,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1260: PetscBinaryWrite(fd,column_indices,cnt,PETSC_INT,PETSC_TRUE);
1261: }
1262: PetscViewerFlowControlEndMaster(viewer,&message_count);
1263: } else {
1264: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1265: MPI_Send(&cnt,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1266: MPI_Send(column_indices,cnt,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1267: PetscViewerFlowControlEndWorker(viewer,&message_count);
1268: }
1269: PetscFree(column_indices);
1271: /* load up the numerical values */
1272: PetscMalloc1(nzmax,&column_values);
1273: cnt = 0;
1274: for (i=0; i<a->mbs; i++) {
1275: rlen = bs*(B->i[i+1] - B->i[i] + A->i[i+1] - A->i[i]);
1276: for (j=B->i[i]; j<B->i[i+1]; j++) {
1277: if (garray[B->j[j]] > cstart) break;
1278: for (l=0; l<bs; l++) {
1279: for (ll=0; ll<bs; ll++) {
1280: column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll];
1281: }
1282: }
1283: cnt += bs;
1284: }
1285: for (k=A->i[i]; k<A->i[i+1]; k++) {
1286: for (l=0; l<bs; l++) {
1287: for (ll=0; ll<bs; ll++) {
1288: column_values[cnt + l*rlen + ll] = A->a[bs2*k+l+bs*ll];
1289: }
1290: }
1291: cnt += bs;
1292: }
1293: for (; j<B->i[i+1]; j++) {
1294: for (l=0; l<bs; l++) {
1295: for (ll=0; ll<bs; ll++) {
1296: column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll];
1297: }
1298: }
1299: cnt += bs;
1300: }
1301: cnt += (bs-1)*rlen;
1302: }
1303: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1305: /* store the column values to the file */
1306: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1307: if (!rank) {
1308: MPI_Status status;
1309: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1310: for (i=1; i<size; i++) {
1311: PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);
1312: MPI_Recv(&cnt,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);
1313: MPI_Recv(column_values,cnt,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat),&status);
1314: PetscBinaryWrite(fd,column_values,cnt,PETSC_SCALAR,PETSC_TRUE);
1315: }
1316: PetscViewerFlowControlEndMaster(viewer,&message_count);
1317: } else {
1318: PetscViewerFlowControlStepWorker(viewer,rank,&message_count);
1319: MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));
1320: MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));
1321: PetscViewerFlowControlEndWorker(viewer,&message_count);
1322: }
1323: PetscFree(column_values);
1325: PetscViewerBinaryGetInfoPointer(viewer,&file);
1326: if (file) {
1327: fprintf(file,"-matload_block_size %d\n",(int)mat->rmap->bs);
1328: }
1329: return(0);
1330: }
1334: PetscErrorCode MatView_MPIBAIJ(Mat mat,PetscViewer viewer)
1335: {
1337: PetscBool iascii,isdraw,issocket,isbinary;
1340: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1341: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1342: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1343: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1344: if (iascii || isdraw || issocket) {
1345: MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);
1346: } else if (isbinary) {
1347: MatView_MPIBAIJ_Binary(mat,viewer);
1348: }
1349: return(0);
1350: }
1354: PetscErrorCode MatDestroy_MPIBAIJ(Mat mat)
1355: {
1356: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1360: #if defined(PETSC_USE_LOG)
1361: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N);
1362: #endif
1363: MatStashDestroy_Private(&mat->stash);
1364: MatStashDestroy_Private(&mat->bstash);
1365: MatDestroy(&baij->A);
1366: MatDestroy(&baij->B);
1367: #if defined(PETSC_USE_CTABLE)
1368: PetscTableDestroy(&baij->colmap);
1369: #else
1370: PetscFree(baij->colmap);
1371: #endif
1372: PetscFree(baij->garray);
1373: VecDestroy(&baij->lvec);
1374: VecScatterDestroy(&baij->Mvctx);
1375: PetscFree2(baij->rowvalues,baij->rowindices);
1376: PetscFree(baij->barray);
1377: PetscFree2(baij->hd,baij->ht);
1378: PetscFree(baij->rangebs);
1379: PetscFree(mat->data);
1381: PetscObjectChangeTypeName((PetscObject)mat,0);
1382: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1383: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1384: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C",NULL);
1385: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocation_C",NULL);
1386: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocationCSR_C",NULL);
1387: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1388: PetscObjectComposeFunction((PetscObject)mat,"MatSetHashTableFactor_C",NULL);
1389: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpisbaij_C",NULL);
1390: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpibstrm_C",NULL);
1391: return(0);
1392: }
1396: PetscErrorCode MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy)
1397: {
1398: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1400: PetscInt nt;
1403: VecGetLocalSize(xx,&nt);
1404: if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1405: VecGetLocalSize(yy,&nt);
1406: if (nt != A->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1407: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1408: (*a->A->ops->mult)(a->A,xx,yy);
1409: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1410: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1411: return(0);
1412: }
1416: PetscErrorCode MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1417: {
1418: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1422: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1423: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1424: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1425: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1426: return(0);
1427: }
1431: PetscErrorCode MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy)
1432: {
1433: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1435: PetscBool merged;
1438: VecScatterGetMerged(a->Mvctx,&merged);
1439: /* do nondiagonal part */
1440: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1441: if (!merged) {
1442: /* send it on its way */
1443: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1444: /* do local part */
1445: (*a->A->ops->multtranspose)(a->A,xx,yy);
1446: /* receive remote parts: note this assumes the values are not actually */
1447: /* inserted in yy until the next line */
1448: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1449: } else {
1450: /* do local part */
1451: (*a->A->ops->multtranspose)(a->A,xx,yy);
1452: /* send it on its way */
1453: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1454: /* values actually were received in the Begin() but we need to call this nop */
1455: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1456: }
1457: return(0);
1458: }
1462: PetscErrorCode MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1463: {
1464: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1468: /* do nondiagonal part */
1469: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1470: /* send it on its way */
1471: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1472: /* do local part */
1473: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1474: /* receive remote parts: note this assumes the values are not actually */
1475: /* inserted in yy until the next line, which is true for my implementation*/
1476: /* but is not perhaps always true. */
1477: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1478: return(0);
1479: }
1481: /*
1482: This only works correctly for square matrices where the subblock A->A is the
1483: diagonal block
1484: */
1487: PetscErrorCode MatGetDiagonal_MPIBAIJ(Mat A,Vec v)
1488: {
1489: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1493: if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1494: MatGetDiagonal(a->A,v);
1495: return(0);
1496: }
1500: PetscErrorCode MatScale_MPIBAIJ(Mat A,PetscScalar aa)
1501: {
1502: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1506: MatScale(a->A,aa);
1507: MatScale(a->B,aa);
1508: return(0);
1509: }
1513: PetscErrorCode MatGetRow_MPIBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1514: {
1515: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data;
1516: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1518: PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
1519: PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend;
1520: PetscInt *cmap,*idx_p,cstart = mat->cstartbs;
1523: if (row < brstart || row >= brend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local rows");
1524: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1525: mat->getrowactive = PETSC_TRUE;
1527: if (!mat->rowvalues && (idx || v)) {
1528: /*
1529: allocate enough space to hold information from the longest row.
1530: */
1531: Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data;
1532: PetscInt max = 1,mbs = mat->mbs,tmp;
1533: for (i=0; i<mbs; i++) {
1534: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1535: if (max < tmp) max = tmp;
1536: }
1537: PetscMalloc2(max*bs2,&mat->rowvalues,max*bs2,&mat->rowindices);
1538: }
1539: lrow = row - brstart;
1541: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1542: if (!v) {pvA = 0; pvB = 0;}
1543: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1544: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1545: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1546: nztot = nzA + nzB;
1548: cmap = mat->garray;
1549: if (v || idx) {
1550: if (nztot) {
1551: /* Sort by increasing column numbers, assuming A and B already sorted */
1552: PetscInt imark = -1;
1553: if (v) {
1554: *v = v_p = mat->rowvalues;
1555: for (i=0; i<nzB; i++) {
1556: if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i];
1557: else break;
1558: }
1559: imark = i;
1560: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1561: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1562: }
1563: if (idx) {
1564: *idx = idx_p = mat->rowindices;
1565: if (imark > -1) {
1566: for (i=0; i<imark; i++) {
1567: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1568: }
1569: } else {
1570: for (i=0; i<nzB; i++) {
1571: if (cmap[cworkB[i]/bs] < cstart) idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1572: else break;
1573: }
1574: imark = i;
1575: }
1576: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i];
1577: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1578: }
1579: } else {
1580: if (idx) *idx = 0;
1581: if (v) *v = 0;
1582: }
1583: }
1584: *nz = nztot;
1585: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1586: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1587: return(0);
1588: }
1592: PetscErrorCode MatRestoreRow_MPIBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1593: {
1594: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1597: if (!baij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1598: baij->getrowactive = PETSC_FALSE;
1599: return(0);
1600: }
1604: PetscErrorCode MatZeroEntries_MPIBAIJ(Mat A)
1605: {
1606: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1610: MatZeroEntries(l->A);
1611: MatZeroEntries(l->B);
1612: return(0);
1613: }
1617: PetscErrorCode MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1618: {
1619: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data;
1620: Mat A = a->A,B = a->B;
1622: PetscReal isend[5],irecv[5];
1625: info->block_size = (PetscReal)matin->rmap->bs;
1627: MatGetInfo(A,MAT_LOCAL,info);
1629: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1630: isend[3] = info->memory; isend[4] = info->mallocs;
1632: MatGetInfo(B,MAT_LOCAL,info);
1634: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1635: isend[3] += info->memory; isend[4] += info->mallocs;
1637: if (flag == MAT_LOCAL) {
1638: info->nz_used = isend[0];
1639: info->nz_allocated = isend[1];
1640: info->nz_unneeded = isend[2];
1641: info->memory = isend[3];
1642: info->mallocs = isend[4];
1643: } else if (flag == MAT_GLOBAL_MAX) {
1644: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));
1646: info->nz_used = irecv[0];
1647: info->nz_allocated = irecv[1];
1648: info->nz_unneeded = irecv[2];
1649: info->memory = irecv[3];
1650: info->mallocs = irecv[4];
1651: } else if (flag == MAT_GLOBAL_SUM) {
1652: MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));
1654: info->nz_used = irecv[0];
1655: info->nz_allocated = irecv[1];
1656: info->nz_unneeded = irecv[2];
1657: info->memory = irecv[3];
1658: info->mallocs = irecv[4];
1659: } else SETERRQ1(PetscObjectComm((PetscObject)matin),PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1660: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1661: info->fill_ratio_needed = 0;
1662: info->factor_mallocs = 0;
1663: return(0);
1664: }
1668: PetscErrorCode MatSetOption_MPIBAIJ(Mat A,MatOption op,PetscBool flg)
1669: {
1670: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1674: switch (op) {
1675: case MAT_NEW_NONZERO_LOCATIONS:
1676: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1677: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1678: case MAT_KEEP_NONZERO_PATTERN:
1679: case MAT_NEW_NONZERO_LOCATION_ERR:
1680: MatCheckPreallocated(A,1);
1681: MatSetOption(a->A,op,flg);
1682: MatSetOption(a->B,op,flg);
1683: break;
1684: case MAT_ROW_ORIENTED:
1685: MatCheckPreallocated(A,1);
1686: a->roworiented = flg;
1688: MatSetOption(a->A,op,flg);
1689: MatSetOption(a->B,op,flg);
1690: break;
1691: case MAT_NEW_DIAGONALS:
1692: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1693: break;
1694: case MAT_IGNORE_OFF_PROC_ENTRIES:
1695: a->donotstash = flg;
1696: break;
1697: case MAT_USE_HASH_TABLE:
1698: a->ht_flag = flg;
1699: break;
1700: case MAT_SYMMETRIC:
1701: case MAT_STRUCTURALLY_SYMMETRIC:
1702: case MAT_HERMITIAN:
1703: case MAT_SYMMETRY_ETERNAL:
1704: MatCheckPreallocated(A,1);
1705: MatSetOption(a->A,op,flg);
1706: break;
1707: default:
1708: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"unknown option %d",op);
1709: }
1710: return(0);
1711: }
1715: PetscErrorCode MatTranspose_MPIBAIJ(Mat A,MatReuse reuse,Mat *matout)
1716: {
1717: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data;
1718: Mat_SeqBAIJ *Aloc;
1719: Mat B;
1721: PetscInt M =A->rmap->N,N=A->cmap->N,*ai,*aj,i,*rvals,j,k,col;
1722: PetscInt bs=A->rmap->bs,mbs=baij->mbs;
1723: MatScalar *a;
1726: if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1727: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1728: MatCreate(PetscObjectComm((PetscObject)A),&B);
1729: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1730: MatSetType(B,((PetscObject)A)->type_name);
1731: /* Do not know preallocation information, but must set block size */
1732: MatMPIBAIJSetPreallocation(B,A->rmap->bs,PETSC_DECIDE,NULL,PETSC_DECIDE,NULL);
1733: } else {
1734: B = *matout;
1735: }
1737: /* copy over the A part */
1738: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1739: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1740: PetscMalloc1(bs,&rvals);
1742: for (i=0; i<mbs; i++) {
1743: rvals[0] = bs*(baij->rstartbs + i);
1744: for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1;
1745: for (j=ai[i]; j<ai[i+1]; j++) {
1746: col = (baij->cstartbs+aj[j])*bs;
1747: for (k=0; k<bs; k++) {
1748: MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);
1750: col++; a += bs;
1751: }
1752: }
1753: }
1754: /* copy over the B part */
1755: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1756: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1757: for (i=0; i<mbs; i++) {
1758: rvals[0] = bs*(baij->rstartbs + i);
1759: for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1;
1760: for (j=ai[i]; j<ai[i+1]; j++) {
1761: col = baij->garray[aj[j]]*bs;
1762: for (k=0; k<bs; k++) {
1763: MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);
1764: col++;
1765: a += bs;
1766: }
1767: }
1768: }
1769: PetscFree(rvals);
1770: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1771: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1773: if (reuse == MAT_INITIAL_MATRIX || *matout != A) *matout = B;
1774: else {
1775: MatHeaderMerge(A,&B);
1776: }
1777: return(0);
1778: }
1782: PetscErrorCode MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr)
1783: {
1784: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1785: Mat a = baij->A,b = baij->B;
1787: PetscInt s1,s2,s3;
1790: MatGetLocalSize(mat,&s2,&s3);
1791: if (rr) {
1792: VecGetLocalSize(rr,&s1);
1793: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1794: /* Overlap communication with computation. */
1795: VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1796: }
1797: if (ll) {
1798: VecGetLocalSize(ll,&s1);
1799: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1800: (*b->ops->diagonalscale)(b,ll,NULL);
1801: }
1802: /* scale the diagonal block */
1803: (*a->ops->diagonalscale)(a,ll,rr);
1805: if (rr) {
1806: /* Do a scatter end and then right scale the off-diagonal block */
1807: VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1808: (*b->ops->diagonalscale)(b,NULL,baij->lvec);
1809: }
1810: return(0);
1811: }
1815: PetscErrorCode MatZeroRows_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
1816: {
1817: Mat_MPIBAIJ *l = (Mat_MPIBAIJ *) A->data;
1818: PetscInt *owners = A->rmap->range;
1819: PetscInt n = A->rmap->n;
1820: PetscSF sf;
1821: PetscInt *lrows;
1822: PetscSFNode *rrows;
1823: PetscInt r, p = 0, len = 0;
1827: /* Create SF where leaves are input rows and roots are owned rows */
1828: PetscMalloc1(n, &lrows);
1829: for (r = 0; r < n; ++r) lrows[r] = -1;
1830: if (!A->nooffproczerorows) {PetscMalloc1(N, &rrows);}
1831: for (r = 0; r < N; ++r) {
1832: const PetscInt idx = rows[r];
1833: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
1834: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
1835: PetscLayoutFindOwner(A->rmap,idx,&p);
1836: }
1837: if (A->nooffproczerorows) {
1838: if (p != l->rank) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"MAT_NO_OFF_PROC_ZERO_ROWS set, but row %D is not owned by rank %d",idx,l->rank);
1839: lrows[len++] = idx - owners[p];
1840: } else {
1841: rrows[r].rank = p;
1842: rrows[r].index = rows[r] - owners[p];
1843: }
1844: }
1845: if (!A->nooffproczerorows) {
1846: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1847: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1848: /* Collect flags for rows to be zeroed */
1849: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1850: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1851: PetscSFDestroy(&sf);
1852: /* Compress and put in row numbers */
1853: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1854: }
1855: /* fix right hand side if needed */
1856: if (x && b) {
1857: const PetscScalar *xx;
1858: PetscScalar *bb;
1860: VecGetArrayRead(x,&xx);
1861: VecGetArray(b,&bb);
1862: for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
1863: VecRestoreArrayRead(x,&xx);
1864: VecRestoreArray(b,&bb);
1865: }
1867: /* actually zap the local rows */
1868: /*
1869: Zero the required rows. If the "diagonal block" of the matrix
1870: is square and the user wishes to set the diagonal we use separate
1871: code so that MatSetValues() is not called for each diagonal allocating
1872: new memory, thus calling lots of mallocs and slowing things down.
1874: */
1875: /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1876: MatZeroRows_SeqBAIJ(l->B,len,lrows,0.0,NULL,NULL);
1877: if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) {
1878: MatZeroRows_SeqBAIJ(l->A,len,lrows,diag,NULL,NULL);
1879: } else if (diag != 0.0) {
1880: MatZeroRows_SeqBAIJ(l->A,len,lrows,0.0,0,0);
1881: if (((Mat_SeqBAIJ*)l->A->data)->nonew) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1882: MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1883: for (r = 0; r < len; ++r) {
1884: const PetscInt row = lrows[r] + A->rmap->rstart;
1885: MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);
1886: }
1887: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1888: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1889: } else {
1890: MatZeroRows_SeqBAIJ(l->A,len,lrows,0.0,NULL,NULL);
1891: }
1892: PetscFree(lrows);
1894: /* only change matrix nonzero state if pattern was allowed to be changed */
1895: if (!((Mat_SeqBAIJ*)(l->A->data))->keepnonzeropattern) {
1896: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1897: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1898: }
1899: return(0);
1900: }
1904: PetscErrorCode MatZeroRowsColumns_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
1905: {
1906: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1907: PetscErrorCode ierr;
1908: PetscMPIInt n = A->rmap->n;
1909: PetscInt i,j,k,r,p = 0,len = 0,row,col,count;
1910: PetscInt *lrows,*owners = A->rmap->range;
1911: PetscSFNode *rrows;
1912: PetscSF sf;
1913: const PetscScalar *xx;
1914: PetscScalar *bb,*mask;
1915: Vec xmask,lmask;
1916: Mat_SeqBAIJ *baij = (Mat_SeqBAIJ*)l->B->data;
1917: PetscInt bs = A->rmap->bs, bs2 = baij->bs2;
1918: PetscScalar *aa;
1921: /* Create SF where leaves are input rows and roots are owned rows */
1922: PetscMalloc1(n, &lrows);
1923: for (r = 0; r < n; ++r) lrows[r] = -1;
1924: PetscMalloc1(N, &rrows);
1925: for (r = 0; r < N; ++r) {
1926: const PetscInt idx = rows[r];
1927: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
1928: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
1929: PetscLayoutFindOwner(A->rmap,idx,&p);
1930: }
1931: rrows[r].rank = p;
1932: rrows[r].index = rows[r] - owners[p];
1933: }
1934: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1935: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1936: /* Collect flags for rows to be zeroed */
1937: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1938: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1939: PetscSFDestroy(&sf);
1940: /* Compress and put in row numbers */
1941: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1942: /* zero diagonal part of matrix */
1943: MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
1944: /* handle off diagonal part of matrix */
1945: MatCreateVecs(A,&xmask,NULL);
1946: VecDuplicate(l->lvec,&lmask);
1947: VecGetArray(xmask,&bb);
1948: for (i=0; i<len; i++) bb[lrows[i]] = 1;
1949: VecRestoreArray(xmask,&bb);
1950: VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1951: VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1952: VecDestroy(&xmask);
1953: if (x) {
1954: VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1955: VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1956: VecGetArrayRead(l->lvec,&xx);
1957: VecGetArray(b,&bb);
1958: }
1959: VecGetArray(lmask,&mask);
1960: /* remove zeroed rows of off diagonal matrix */
1961: for (i = 0; i < len; ++i) {
1962: row = lrows[i];
1963: count = (baij->i[row/bs +1] - baij->i[row/bs])*bs;
1964: aa = ((MatScalar*)(baij->a)) + baij->i[row/bs]*bs2 + (row%bs);
1965: for (k = 0; k < count; ++k) {
1966: aa[0] = 0.0;
1967: aa += bs;
1968: }
1969: }
1970: /* loop over all elements of off process part of matrix zeroing removed columns*/
1971: for (i = 0; i < l->B->rmap->N; ++i) {
1972: row = i/bs;
1973: for (j = baij->i[row]; j < baij->i[row+1]; ++j) {
1974: for (k = 0; k < bs; ++k) {
1975: col = bs*baij->j[j] + k;
1976: if (PetscAbsScalar(mask[col])) {
1977: aa = ((MatScalar*)(baij->a)) + j*bs2 + (i%bs) + bs*k;
1978: if (x) bb[i] -= aa[0]*xx[col];
1979: aa[0] = 0.0;
1980: }
1981: }
1982: }
1983: }
1984: if (x) {
1985: VecRestoreArray(b,&bb);
1986: VecRestoreArrayRead(l->lvec,&xx);
1987: }
1988: VecRestoreArray(lmask,&mask);
1989: VecDestroy(&lmask);
1990: PetscFree(lrows);
1992: /* only change matrix nonzero state if pattern was allowed to be changed */
1993: if (!((Mat_SeqBAIJ*)(l->A->data))->keepnonzeropattern) {
1994: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1995: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1996: }
1997: return(0);
1998: }
2002: PetscErrorCode MatSetUnfactored_MPIBAIJ(Mat A)
2003: {
2004: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2008: MatSetUnfactored(a->A);
2009: return(0);
2010: }
2012: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat*);
2016: PetscErrorCode MatEqual_MPIBAIJ(Mat A,Mat B,PetscBool *flag)
2017: {
2018: Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data;
2019: Mat a,b,c,d;
2020: PetscBool flg;
2024: a = matA->A; b = matA->B;
2025: c = matB->A; d = matB->B;
2027: MatEqual(a,c,&flg);
2028: if (flg) {
2029: MatEqual(b,d,&flg);
2030: }
2031: MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2032: return(0);
2033: }
2037: PetscErrorCode MatCopy_MPIBAIJ(Mat A,Mat B,MatStructure str)
2038: {
2040: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2041: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data;
2044: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2045: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2046: MatCopy_Basic(A,B,str);
2047: } else {
2048: MatCopy(a->A,b->A,str);
2049: MatCopy(a->B,b->B,str);
2050: }
2051: return(0);
2052: }
2056: PetscErrorCode MatSetUp_MPIBAIJ(Mat A)
2057: {
2061: MatMPIBAIJSetPreallocation(A,A->rmap->bs,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2062: return(0);
2063: }
2067: PetscErrorCode MatAXPYGetPreallocation_MPIBAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2068: {
2070: PetscInt bs = Y->rmap->bs,m = Y->rmap->N/bs;
2071: Mat_SeqBAIJ *x = (Mat_SeqBAIJ*)X->data;
2072: Mat_SeqBAIJ *y = (Mat_SeqBAIJ*)Y->data;
2075: MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2076: return(0);
2077: }
2081: PetscErrorCode MatAXPY_MPIBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2082: {
2084: Mat_MPIBAIJ *xx=(Mat_MPIBAIJ*)X->data,*yy=(Mat_MPIBAIJ*)Y->data;
2085: PetscBLASInt bnz,one=1;
2086: Mat_SeqBAIJ *x,*y;
2089: if (str == SAME_NONZERO_PATTERN) {
2090: PetscScalar alpha = a;
2091: x = (Mat_SeqBAIJ*)xx->A->data;
2092: y = (Mat_SeqBAIJ*)yy->A->data;
2093: PetscBLASIntCast(x->nz,&bnz);
2094: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2095: x = (Mat_SeqBAIJ*)xx->B->data;
2096: y = (Mat_SeqBAIJ*)yy->B->data;
2097: PetscBLASIntCast(x->nz,&bnz);
2098: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2099: PetscObjectStateIncrease((PetscObject)Y);
2100: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2101: MatAXPY_Basic(Y,a,X,str);
2102: } else {
2103: Mat B;
2104: PetscInt *nnz_d,*nnz_o,bs=Y->rmap->bs;
2105: PetscMalloc1(yy->A->rmap->N,&nnz_d);
2106: PetscMalloc1(yy->B->rmap->N,&nnz_o);
2107: MatCreate(PetscObjectComm((PetscObject)Y),&B);
2108: PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2109: MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2110: MatSetBlockSizesFromMats(B,Y,Y);
2111: MatSetType(B,MATMPIBAIJ);
2112: MatAXPYGetPreallocation_SeqBAIJ(yy->A,xx->A,nnz_d);
2113: MatAXPYGetPreallocation_MPIBAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2114: MatMPIBAIJSetPreallocation(B,bs,0,nnz_d,0,nnz_o);
2115: /* MatAXPY_BasicWithPreallocation() for BAIJ matrix is much slower than AIJ, even for bs=1 ! */
2116: MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2117: MatHeaderReplace(Y,&B);
2118: PetscFree(nnz_d);
2119: PetscFree(nnz_o);
2120: }
2121: return(0);
2122: }
2126: PetscErrorCode MatRealPart_MPIBAIJ(Mat A)
2127: {
2128: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2132: MatRealPart(a->A);
2133: MatRealPart(a->B);
2134: return(0);
2135: }
2139: PetscErrorCode MatImaginaryPart_MPIBAIJ(Mat A)
2140: {
2141: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2145: MatImaginaryPart(a->A);
2146: MatImaginaryPart(a->B);
2147: return(0);
2148: }
2152: PetscErrorCode MatGetSubMatrix_MPIBAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
2153: {
2155: IS iscol_local;
2156: PetscInt csize;
2159: ISGetLocalSize(iscol,&csize);
2160: if (call == MAT_REUSE_MATRIX) {
2161: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
2162: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
2163: } else {
2164: ISAllGather(iscol,&iscol_local);
2165: }
2166: MatGetSubMatrix_MPIBAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);
2167: if (call == MAT_INITIAL_MATRIX) {
2168: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
2169: ISDestroy(&iscol_local);
2170: }
2171: return(0);
2172: }
2173: extern PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,PetscBool*,Mat*);
2176: /*
2177: Not great since it makes two copies of the submatrix, first an SeqBAIJ
2178: in local and then by concatenating the local matrices the end result.
2179: Writing it directly would be much like MatGetSubMatrices_MPIBAIJ().
2180: This routine is used for BAIJ and SBAIJ matrices (unfortunate dependency).
2181: */
2182: PetscErrorCode MatGetSubMatrix_MPIBAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
2183: {
2185: PetscMPIInt rank,size;
2186: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs;
2187: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal,ncol,nrow;
2188: Mat M,Mreuse;
2189: MatScalar *vwork,*aa;
2190: MPI_Comm comm;
2191: IS isrow_new, iscol_new;
2192: PetscBool idflag,allrows, allcols;
2193: Mat_SeqBAIJ *aij;
2196: PetscObjectGetComm((PetscObject)mat,&comm);
2197: MPI_Comm_rank(comm,&rank);
2198: MPI_Comm_size(comm,&size);
2199: /* The compression and expansion should be avoided. Doesn't point
2200: out errors, might change the indices, hence buggey */
2201: ISCompressIndicesGeneral(mat->rmap->N,mat->rmap->n,mat->rmap->bs,1,&isrow,&isrow_new);
2202: ISCompressIndicesGeneral(mat->cmap->N,mat->cmap->n,mat->cmap->bs,1,&iscol,&iscol_new);
2204: /* Check for special case: each processor gets entire matrix columns */
2205: ISIdentity(iscol,&idflag);
2206: ISGetLocalSize(iscol,&ncol);
2207: if (idflag && ncol == mat->cmap->N) allcols = PETSC_TRUE;
2208: else allcols = PETSC_FALSE;
2210: ISIdentity(isrow,&idflag);
2211: ISGetLocalSize(isrow,&nrow);
2212: if (idflag && nrow == mat->rmap->N) allrows = PETSC_TRUE;
2213: else allrows = PETSC_FALSE;
2215: if (call == MAT_REUSE_MATRIX) {
2216: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
2217: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
2218: MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_REUSE_MATRIX,&allrows,&allcols,&Mreuse);
2219: } else {
2220: MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_INITIAL_MATRIX,&allrows,&allcols,&Mreuse);
2221: }
2222: ISDestroy(&isrow_new);
2223: ISDestroy(&iscol_new);
2224: /*
2225: m - number of local rows
2226: n - number of columns (same on all processors)
2227: rstart - first row in new global matrix generated
2228: */
2229: MatGetBlockSize(mat,&bs);
2230: MatGetSize(Mreuse,&m,&n);
2231: m = m/bs;
2232: n = n/bs;
2234: if (call == MAT_INITIAL_MATRIX) {
2235: aij = (Mat_SeqBAIJ*)(Mreuse)->data;
2236: ii = aij->i;
2237: jj = aij->j;
2239: /*
2240: Determine the number of non-zeros in the diagonal and off-diagonal
2241: portions of the matrix in order to do correct preallocation
2242: */
2244: /* first get start and end of "diagonal" columns */
2245: if (csize == PETSC_DECIDE) {
2246: ISGetSize(isrow,&mglobal);
2247: if (mglobal == n*bs) { /* square matrix */
2248: nlocal = m;
2249: } else {
2250: nlocal = n/size + ((n % size) > rank);
2251: }
2252: } else {
2253: nlocal = csize/bs;
2254: }
2255: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
2256: rstart = rend - nlocal;
2257: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
2259: /* next, compute all the lengths */
2260: PetscMalloc2(m+1,&dlens,m+1,&olens);
2261: for (i=0; i<m; i++) {
2262: jend = ii[i+1] - ii[i];
2263: olen = 0;
2264: dlen = 0;
2265: for (j=0; j<jend; j++) {
2266: if (*jj < rstart || *jj >= rend) olen++;
2267: else dlen++;
2268: jj++;
2269: }
2270: olens[i] = olen;
2271: dlens[i] = dlen;
2272: }
2273: MatCreate(comm,&M);
2274: MatSetSizes(M,bs*m,bs*nlocal,PETSC_DECIDE,bs*n);
2275: MatSetType(M,((PetscObject)mat)->type_name);
2276: MatMPIBAIJSetPreallocation(M,bs,0,dlens,0,olens);
2277: MatMPISBAIJSetPreallocation(M,bs,0,dlens,0,olens);
2278: PetscFree2(dlens,olens);
2279: } else {
2280: PetscInt ml,nl;
2282: M = *newmat;
2283: MatGetLocalSize(M,&ml,&nl);
2284: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
2285: MatZeroEntries(M);
2286: /*
2287: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
2288: rather than the slower MatSetValues().
2289: */
2290: M->was_assembled = PETSC_TRUE;
2291: M->assembled = PETSC_FALSE;
2292: }
2293: MatSetOption(M,MAT_ROW_ORIENTED,PETSC_FALSE);
2294: MatGetOwnershipRange(M,&rstart,&rend);
2295: aij = (Mat_SeqBAIJ*)(Mreuse)->data;
2296: ii = aij->i;
2297: jj = aij->j;
2298: aa = aij->a;
2299: for (i=0; i<m; i++) {
2300: row = rstart/bs + i;
2301: nz = ii[i+1] - ii[i];
2302: cwork = jj; jj += nz;
2303: vwork = aa; aa += nz*bs*bs;
2304: MatSetValuesBlocked_MPIBAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
2305: }
2307: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
2308: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
2309: *newmat = M;
2311: /* save submatrix used in processor for next request */
2312: if (call == MAT_INITIAL_MATRIX) {
2313: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
2314: PetscObjectDereference((PetscObject)Mreuse);
2315: }
2316: return(0);
2317: }
2321: PetscErrorCode MatPermute_MPIBAIJ(Mat A,IS rowp,IS colp,Mat *B)
2322: {
2323: MPI_Comm comm,pcomm;
2324: PetscInt clocal_size,nrows;
2325: const PetscInt *rows;
2326: PetscMPIInt size;
2327: IS crowp,lcolp;
2331: PetscObjectGetComm((PetscObject)A,&comm);
2332: /* make a collective version of 'rowp' */
2333: PetscObjectGetComm((PetscObject)rowp,&pcomm);
2334: if (pcomm==comm) {
2335: crowp = rowp;
2336: } else {
2337: ISGetSize(rowp,&nrows);
2338: ISGetIndices(rowp,&rows);
2339: ISCreateGeneral(comm,nrows,rows,PETSC_COPY_VALUES,&crowp);
2340: ISRestoreIndices(rowp,&rows);
2341: }
2342: ISSetPermutation(crowp);
2343: /* make a local version of 'colp' */
2344: PetscObjectGetComm((PetscObject)colp,&pcomm);
2345: MPI_Comm_size(pcomm,&size);
2346: if (size==1) {
2347: lcolp = colp;
2348: } else {
2349: ISAllGather(colp,&lcolp);
2350: }
2351: ISSetPermutation(lcolp);
2352: /* now we just get the submatrix */
2353: MatGetLocalSize(A,NULL,&clocal_size);
2354: MatGetSubMatrix_MPIBAIJ_Private(A,crowp,lcolp,clocal_size,MAT_INITIAL_MATRIX,B);
2355: /* clean up */
2356: if (pcomm!=comm) {
2357: ISDestroy(&crowp);
2358: }
2359: if (size>1) {
2360: ISDestroy(&lcolp);
2361: }
2362: return(0);
2363: }
2367: PetscErrorCode MatGetGhosts_MPIBAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
2368: {
2369: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*) mat->data;
2370: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
2373: if (nghosts) *nghosts = B->nbs;
2374: if (ghosts) *ghosts = baij->garray;
2375: return(0);
2376: }
2380: PetscErrorCode MatGetSeqNonzeroStructure_MPIBAIJ(Mat A,Mat *newmat)
2381: {
2382: Mat B;
2383: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2384: Mat_SeqBAIJ *ad = (Mat_SeqBAIJ*)a->A->data,*bd = (Mat_SeqBAIJ*)a->B->data;
2385: Mat_SeqAIJ *b;
2387: PetscMPIInt size,rank,*recvcounts = 0,*displs = 0;
2388: PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j,bs = A->rmap->bs;
2389: PetscInt m,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf;
2392: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
2393: MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
2395: /* ----------------------------------------------------------------
2396: Tell every processor the number of nonzeros per row
2397: */
2398: PetscMalloc1(A->rmap->N/bs,&lens);
2399: for (i=A->rmap->rstart/bs; i<A->rmap->rend/bs; i++) {
2400: lens[i] = ad->i[i-A->rmap->rstart/bs+1] - ad->i[i-A->rmap->rstart/bs] + bd->i[i-A->rmap->rstart/bs+1] - bd->i[i-A->rmap->rstart/bs];
2401: }
2402: PetscMalloc1(2*size,&recvcounts);
2403: displs = recvcounts + size;
2404: for (i=0; i<size; i++) {
2405: recvcounts[i] = A->rmap->range[i+1]/bs - A->rmap->range[i]/bs;
2406: displs[i] = A->rmap->range[i]/bs;
2407: }
2408: #if defined(PETSC_HAVE_MPI_IN_PLACE)
2409: MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));
2410: #else
2411: sendcount = A->rmap->rend/bs - A->rmap->rstart/bs;
2412: MPI_Allgatherv(lens+A->rmap->rstart/bs,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));
2413: #endif
2414: /* ---------------------------------------------------------------
2415: Create the sequential matrix of the same type as the local block diagonal
2416: */
2417: MatCreate(PETSC_COMM_SELF,&B);
2418: MatSetSizes(B,A->rmap->N/bs,A->cmap->N/bs,PETSC_DETERMINE,PETSC_DETERMINE);
2419: MatSetType(B,MATSEQAIJ);
2420: MatSeqAIJSetPreallocation(B,0,lens);
2421: b = (Mat_SeqAIJ*)B->data;
2423: /*--------------------------------------------------------------------
2424: Copy my part of matrix column indices over
2425: */
2426: sendcount = ad->nz + bd->nz;
2427: jsendbuf = b->j + b->i[rstarts[rank]/bs];
2428: a_jsendbuf = ad->j;
2429: b_jsendbuf = bd->j;
2430: n = A->rmap->rend/bs - A->rmap->rstart/bs;
2431: cnt = 0;
2432: for (i=0; i<n; i++) {
2434: /* put in lower diagonal portion */
2435: m = bd->i[i+1] - bd->i[i];
2436: while (m > 0) {
2437: /* is it above diagonal (in bd (compressed) numbering) */
2438: if (garray[*b_jsendbuf] > A->rmap->rstart/bs + i) break;
2439: jsendbuf[cnt++] = garray[*b_jsendbuf++];
2440: m--;
2441: }
2443: /* put in diagonal portion */
2444: for (j=ad->i[i]; j<ad->i[i+1]; j++) {
2445: jsendbuf[cnt++] = A->rmap->rstart/bs + *a_jsendbuf++;
2446: }
2448: /* put in upper diagonal portion */
2449: while (m-- > 0) {
2450: jsendbuf[cnt++] = garray[*b_jsendbuf++];
2451: }
2452: }
2453: if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt);
2455: /*--------------------------------------------------------------------
2456: Gather all column indices to all processors
2457: */
2458: for (i=0; i<size; i++) {
2459: recvcounts[i] = 0;
2460: for (j=A->rmap->range[i]/bs; j<A->rmap->range[i+1]/bs; j++) {
2461: recvcounts[i] += lens[j];
2462: }
2463: }
2464: displs[0] = 0;
2465: for (i=1; i<size; i++) {
2466: displs[i] = displs[i-1] + recvcounts[i-1];
2467: }
2468: #if defined(PETSC_HAVE_MPI_IN_PLACE)
2469: MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));
2470: #else
2471: MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));
2472: #endif
2473: /*--------------------------------------------------------------------
2474: Assemble the matrix into useable form (note numerical values not yet set)
2475: */
2476: /* set the b->ilen (length of each row) values */
2477: PetscMemcpy(b->ilen,lens,(A->rmap->N/bs)*sizeof(PetscInt));
2478: /* set the b->i indices */
2479: b->i[0] = 0;
2480: for (i=1; i<=A->rmap->N/bs; i++) {
2481: b->i[i] = b->i[i-1] + lens[i-1];
2482: }
2483: PetscFree(lens);
2484: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2485: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2486: PetscFree(recvcounts);
2488: if (A->symmetric) {
2489: MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);
2490: } else if (A->hermitian) {
2491: MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);
2492: } else if (A->structurally_symmetric) {
2493: MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
2494: }
2495: *newmat = B;
2496: return(0);
2497: }
2501: PetscErrorCode MatSOR_MPIBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2502: {
2503: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data;
2505: Vec bb1 = 0;
2508: if (flag == SOR_APPLY_UPPER) {
2509: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2510: return(0);
2511: }
2513: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS) {
2514: VecDuplicate(bb,&bb1);
2515: }
2517: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
2518: if (flag & SOR_ZERO_INITIAL_GUESS) {
2519: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2520: its--;
2521: }
2523: while (its--) {
2524: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2525: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2527: /* update rhs: bb1 = bb - B*x */
2528: VecScale(mat->lvec,-1.0);
2529: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2531: /* local sweep */
2532: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
2533: }
2534: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
2535: if (flag & SOR_ZERO_INITIAL_GUESS) {
2536: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2537: its--;
2538: }
2539: while (its--) {
2540: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2541: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2543: /* update rhs: bb1 = bb - B*x */
2544: VecScale(mat->lvec,-1.0);
2545: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2547: /* local sweep */
2548: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
2549: }
2550: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
2551: if (flag & SOR_ZERO_INITIAL_GUESS) {
2552: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2553: its--;
2554: }
2555: while (its--) {
2556: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2557: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2559: /* update rhs: bb1 = bb - B*x */
2560: VecScale(mat->lvec,-1.0);
2561: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2563: /* local sweep */
2564: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
2565: }
2566: } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel version of SOR requested not supported");
2568: VecDestroy(&bb1);
2569: return(0);
2570: }
2574: PetscErrorCode MatGetColumnNorms_MPIBAIJ(Mat A,NormType type,PetscReal *norms)
2575: {
2577: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)A->data;
2578: PetscInt N,i,*garray = aij->garray;
2579: PetscInt ib,jb,bs = A->rmap->bs;
2580: Mat_SeqBAIJ *a_aij = (Mat_SeqBAIJ*) aij->A->data;
2581: MatScalar *a_val = a_aij->a;
2582: Mat_SeqBAIJ *b_aij = (Mat_SeqBAIJ*) aij->B->data;
2583: MatScalar *b_val = b_aij->a;
2584: PetscReal *work;
2587: MatGetSize(A,NULL,&N);
2588: PetscCalloc1(N,&work);
2589: if (type == NORM_2) {
2590: for (i=a_aij->i[0]; i<a_aij->i[aij->A->rmap->n/bs]; i++) {
2591: for (jb=0; jb<bs; jb++) {
2592: for (ib=0; ib<bs; ib++) {
2593: work[A->cmap->rstart + a_aij->j[i] * bs + jb] += PetscAbsScalar(*a_val * *a_val);
2594: a_val++;
2595: }
2596: }
2597: }
2598: for (i=b_aij->i[0]; i<b_aij->i[aij->B->rmap->n/bs]; i++) {
2599: for (jb=0; jb<bs; jb++) {
2600: for (ib=0; ib<bs; ib++) {
2601: work[garray[b_aij->j[i]] * bs + jb] += PetscAbsScalar(*b_val * *b_val);
2602: b_val++;
2603: }
2604: }
2605: }
2606: } else if (type == NORM_1) {
2607: for (i=a_aij->i[0]; i<a_aij->i[aij->A->rmap->n/bs]; i++) {
2608: for (jb=0; jb<bs; jb++) {
2609: for (ib=0; ib<bs; ib++) {
2610: work[A->cmap->rstart + a_aij->j[i] * bs + jb] += PetscAbsScalar(*a_val);
2611: a_val++;
2612: }
2613: }
2614: }
2615: for (i=b_aij->i[0]; i<b_aij->i[aij->B->rmap->n/bs]; i++) {
2616: for (jb=0; jb<bs; jb++) {
2617: for (ib=0; ib<bs; ib++) {
2618: work[garray[b_aij->j[i]] * bs + jb] += PetscAbsScalar(*b_val);
2619: b_val++;
2620: }
2621: }
2622: }
2623: } else if (type == NORM_INFINITY) {
2624: for (i=a_aij->i[0]; i<a_aij->i[aij->A->rmap->n/bs]; i++) {
2625: for (jb=0; jb<bs; jb++) {
2626: for (ib=0; ib<bs; ib++) {
2627: int col = A->cmap->rstart + a_aij->j[i] * bs + jb;
2628: work[col] = PetscMax(PetscAbsScalar(*a_val), work[col]);
2629: a_val++;
2630: }
2631: }
2632: }
2633: for (i=b_aij->i[0]; i<b_aij->i[aij->B->rmap->n/bs]; i++) {
2634: for (jb=0; jb<bs; jb++) {
2635: for (ib=0; ib<bs; ib++) {
2636: int col = garray[b_aij->j[i]] * bs + jb;
2637: work[col] = PetscMax(PetscAbsScalar(*b_val), work[col]);
2638: b_val++;
2639: }
2640: }
2641: }
2642: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
2643: if (type == NORM_INFINITY) {
2644: MPIU_Allreduce(work,norms,N,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
2645: } else {
2646: MPIU_Allreduce(work,norms,N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
2647: }
2648: PetscFree(work);
2649: if (type == NORM_2) {
2650: for (i=0; i<N; i++) norms[i] = PetscSqrtReal(norms[i]);
2651: }
2652: return(0);
2653: }
2657: PetscErrorCode MatInvertBlockDiagonal_MPIBAIJ(Mat A,const PetscScalar **values)
2658: {
2659: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*) A->data;
2663: MatInvertBlockDiagonal(a->A,values);
2664: A->errortype = a->A->errortype;
2665: return(0);
2666: }
2670: PetscErrorCode MatShift_MPIBAIJ(Mat Y,PetscScalar a)
2671: {
2673: Mat_MPIBAIJ *maij = (Mat_MPIBAIJ*)Y->data;
2674: Mat_SeqBAIJ *aij = (Mat_SeqBAIJ*)maij->A->data;
2677: if (!Y->preallocated) {
2678: MatMPIBAIJSetPreallocation(Y,Y->rmap->bs,1,NULL,0,NULL);
2679: } else if (!aij->nz) {
2680: PetscInt nonew = aij->nonew;
2681: MatSeqBAIJSetPreallocation(maij->A,Y->rmap->bs,1,NULL);
2682: aij->nonew = nonew;
2683: }
2684: MatShift_Basic(Y,a);
2685: return(0);
2686: }
2690: PetscErrorCode MatMissingDiagonal_MPIBAIJ(Mat A,PetscBool *missing,PetscInt *d)
2691: {
2692: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2696: if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2697: MatMissingDiagonal(a->A,missing,d);
2698: if (d) {
2699: PetscInt rstart;
2700: MatGetOwnershipRange(A,&rstart,NULL);
2701: *d += rstart/A->rmap->bs;
2703: }
2704: return(0);
2705: }
2707: /* -------------------------------------------------------------------*/
2708: static struct _MatOps MatOps_Values = {MatSetValues_MPIBAIJ,
2709: MatGetRow_MPIBAIJ,
2710: MatRestoreRow_MPIBAIJ,
2711: MatMult_MPIBAIJ,
2712: /* 4*/ MatMultAdd_MPIBAIJ,
2713: MatMultTranspose_MPIBAIJ,
2714: MatMultTransposeAdd_MPIBAIJ,
2715: 0,
2716: 0,
2717: 0,
2718: /*10*/ 0,
2719: 0,
2720: 0,
2721: MatSOR_MPIBAIJ,
2722: MatTranspose_MPIBAIJ,
2723: /*15*/ MatGetInfo_MPIBAIJ,
2724: MatEqual_MPIBAIJ,
2725: MatGetDiagonal_MPIBAIJ,
2726: MatDiagonalScale_MPIBAIJ,
2727: MatNorm_MPIBAIJ,
2728: /*20*/ MatAssemblyBegin_MPIBAIJ,
2729: MatAssemblyEnd_MPIBAIJ,
2730: MatSetOption_MPIBAIJ,
2731: MatZeroEntries_MPIBAIJ,
2732: /*24*/ MatZeroRows_MPIBAIJ,
2733: 0,
2734: 0,
2735: 0,
2736: 0,
2737: /*29*/ MatSetUp_MPIBAIJ,
2738: 0,
2739: 0,
2740: 0,
2741: 0,
2742: /*34*/ MatDuplicate_MPIBAIJ,
2743: 0,
2744: 0,
2745: 0,
2746: 0,
2747: /*39*/ MatAXPY_MPIBAIJ,
2748: MatGetSubMatrices_MPIBAIJ,
2749: MatIncreaseOverlap_MPIBAIJ,
2750: MatGetValues_MPIBAIJ,
2751: MatCopy_MPIBAIJ,
2752: /*44*/ 0,
2753: MatScale_MPIBAIJ,
2754: MatShift_MPIBAIJ,
2755: 0,
2756: MatZeroRowsColumns_MPIBAIJ,
2757: /*49*/ 0,
2758: 0,
2759: 0,
2760: 0,
2761: 0,
2762: /*54*/ MatFDColoringCreate_MPIXAIJ,
2763: 0,
2764: MatSetUnfactored_MPIBAIJ,
2765: MatPermute_MPIBAIJ,
2766: MatSetValuesBlocked_MPIBAIJ,
2767: /*59*/ MatGetSubMatrix_MPIBAIJ,
2768: MatDestroy_MPIBAIJ,
2769: MatView_MPIBAIJ,
2770: 0,
2771: 0,
2772: /*64*/ 0,
2773: 0,
2774: 0,
2775: 0,
2776: 0,
2777: /*69*/ MatGetRowMaxAbs_MPIBAIJ,
2778: 0,
2779: 0,
2780: 0,
2781: 0,
2782: /*74*/ 0,
2783: MatFDColoringApply_BAIJ,
2784: 0,
2785: 0,
2786: 0,
2787: /*79*/ 0,
2788: 0,
2789: 0,
2790: 0,
2791: MatLoad_MPIBAIJ,
2792: /*84*/ 0,
2793: 0,
2794: 0,
2795: 0,
2796: 0,
2797: /*89*/ 0,
2798: 0,
2799: 0,
2800: 0,
2801: 0,
2802: /*94*/ 0,
2803: 0,
2804: 0,
2805: 0,
2806: 0,
2807: /*99*/ 0,
2808: 0,
2809: 0,
2810: 0,
2811: 0,
2812: /*104*/0,
2813: MatRealPart_MPIBAIJ,
2814: MatImaginaryPart_MPIBAIJ,
2815: 0,
2816: 0,
2817: /*109*/0,
2818: 0,
2819: 0,
2820: 0,
2821: MatMissingDiagonal_MPIBAIJ,
2822: /*114*/MatGetSeqNonzeroStructure_MPIBAIJ,
2823: 0,
2824: MatGetGhosts_MPIBAIJ,
2825: 0,
2826: 0,
2827: /*119*/0,
2828: 0,
2829: 0,
2830: 0,
2831: MatGetMultiProcBlock_MPIBAIJ,
2832: /*124*/0,
2833: MatGetColumnNorms_MPIBAIJ,
2834: MatInvertBlockDiagonal_MPIBAIJ,
2835: 0,
2836: 0,
2837: /*129*/ 0,
2838: 0,
2839: 0,
2840: 0,
2841: 0,
2842: /*134*/ 0,
2843: 0,
2844: 0,
2845: 0,
2846: 0,
2847: /*139*/ 0,
2848: 0,
2849: 0,
2850: MatFDColoringSetUp_MPIXAIJ,
2851: 0,
2852: /*144*/MatCreateMPIMatConcatenateSeqMat_MPIBAIJ
2853: };
2857: PetscErrorCode MatGetDiagonalBlock_MPIBAIJ(Mat A,Mat *a)
2858: {
2860: *a = ((Mat_MPIBAIJ*)A->data)->A;
2861: return(0);
2862: }
2864: PETSC_INTERN PetscErrorCode MatConvert_MPIBAIJ_MPISBAIJ(Mat, MatType,MatReuse,Mat*);
2868: PetscErrorCode MatMPIBAIJSetPreallocationCSR_MPIBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[])
2869: {
2870: PetscInt m,rstart,cstart,cend;
2871: PetscInt i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0;
2872: const PetscInt *JJ =0;
2873: PetscScalar *values=0;
2874: PetscBool roworiented = ((Mat_MPIBAIJ*)B->data)->roworiented;
2878: PetscLayoutSetBlockSize(B->rmap,bs);
2879: PetscLayoutSetBlockSize(B->cmap,bs);
2880: PetscLayoutSetUp(B->rmap);
2881: PetscLayoutSetUp(B->cmap);
2882: PetscLayoutGetBlockSize(B->rmap,&bs);
2883: m = B->rmap->n/bs;
2884: rstart = B->rmap->rstart/bs;
2885: cstart = B->cmap->rstart/bs;
2886: cend = B->cmap->rend/bs;
2888: if (ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]);
2889: PetscMalloc2(m,&d_nnz,m,&o_nnz);
2890: for (i=0; i<m; i++) {
2891: nz = ii[i+1] - ii[i];
2892: if (nz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz);
2893: nz_max = PetscMax(nz_max,nz);
2894: JJ = jj + ii[i];
2895: for (j=0; j<nz; j++) {
2896: if (*JJ >= cstart) break;
2897: JJ++;
2898: }
2899: d = 0;
2900: for (; j<nz; j++) {
2901: if (*JJ++ >= cend) break;
2902: d++;
2903: }
2904: d_nnz[i] = d;
2905: o_nnz[i] = nz - d;
2906: }
2907: MatMPIBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);
2908: PetscFree2(d_nnz,o_nnz);
2910: values = (PetscScalar*)V;
2911: if (!values) {
2912: PetscMalloc1(bs*bs*nz_max,&values);
2913: PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));
2914: }
2915: for (i=0; i<m; i++) {
2916: PetscInt row = i + rstart;
2917: PetscInt ncols = ii[i+1] - ii[i];
2918: const PetscInt *icols = jj + ii[i];
2919: if (!roworiented) { /* block ordering matches the non-nested layout of MatSetValues so we can insert entire rows */
2920: const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0);
2921: MatSetValuesBlocked_MPIBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);
2922: } else { /* block ordering does not match so we can only insert one block at a time. */
2923: PetscInt j;
2924: for (j=0; j<ncols; j++) {
2925: const PetscScalar *svals = values + (V ? (bs*bs*(ii[i]+j)) : 0);
2926: MatSetValuesBlocked_MPIBAIJ(B,1,&row,1,&icols[j],svals,INSERT_VALUES);
2927: }
2928: }
2929: }
2931: if (!V) { PetscFree(values); }
2932: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2933: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2934: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
2935: return(0);
2936: }
2940: /*@C
2941: MatMPIBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in BAIJ format
2942: (the default parallel PETSc format).
2944: Collective on MPI_Comm
2946: Input Parameters:
2947: + B - the matrix
2948: . bs - the block size
2949: . i - the indices into j for the start of each local row (starts with zero)
2950: . j - the column indices for each local row (starts with zero) these must be sorted for each row
2951: - v - optional values in the matrix
2953: Level: developer
2955: Notes: The order of the entries in values is specified by the MatOption MAT_ROW_ORIENTED. For example, C programs
2956: may want to use the default MAT_ROW_ORIENTED=PETSC_TRUE and use an array v[nnz][bs][bs] where the second index is
2957: over rows within a block and the last index is over columns within a block row. Fortran programs will likely set
2958: MAT_ROW_ORIENTED=PETSC_FALSE and use a Fortran array v(bs,bs,nnz) in which the first index is over rows within a
2959: block column and the second index is over columns within a block.
2961: .keywords: matrix, aij, compressed row, sparse, parallel
2963: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateAIJ(), MPIAIJ, MatCreateMPIBAIJWithArrays(), MPIBAIJ
2964: @*/
2965: PetscErrorCode MatMPIBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
2966: {
2973: PetscTryMethod(B,"MatMPIBAIJSetPreallocationCSR_C",(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,bs,i,j,v));
2974: return(0);
2975: }
2979: PetscErrorCode MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt *d_nnz,PetscInt o_nz,const PetscInt *o_nnz)
2980: {
2981: Mat_MPIBAIJ *b;
2983: PetscInt i;
2986: MatSetBlockSize(B,PetscAbs(bs));
2987: PetscLayoutSetUp(B->rmap);
2988: PetscLayoutSetUp(B->cmap);
2989: PetscLayoutGetBlockSize(B->rmap,&bs);
2991: if (d_nnz) {
2992: for (i=0; i<B->rmap->n/bs; i++) {
2993: if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
2994: }
2995: }
2996: if (o_nnz) {
2997: for (i=0; i<B->rmap->n/bs; i++) {
2998: if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
2999: }
3000: }
3002: b = (Mat_MPIBAIJ*)B->data;
3003: b->bs2 = bs*bs;
3004: b->mbs = B->rmap->n/bs;
3005: b->nbs = B->cmap->n/bs;
3006: b->Mbs = B->rmap->N/bs;
3007: b->Nbs = B->cmap->N/bs;
3009: for (i=0; i<=b->size; i++) {
3010: b->rangebs[i] = B->rmap->range[i]/bs;
3011: }
3012: b->rstartbs = B->rmap->rstart/bs;
3013: b->rendbs = B->rmap->rend/bs;
3014: b->cstartbs = B->cmap->rstart/bs;
3015: b->cendbs = B->cmap->rend/bs;
3017: if (!B->preallocated) {
3018: MatCreate(PETSC_COMM_SELF,&b->A);
3019: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
3020: MatSetType(b->A,MATSEQBAIJ);
3021: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
3022: MatCreate(PETSC_COMM_SELF,&b->B);
3023: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
3024: MatSetType(b->B,MATSEQBAIJ);
3025: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
3026: MatStashCreate_Private(PetscObjectComm((PetscObject)B),bs,&B->bstash);
3027: }
3029: MatSeqBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
3030: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
3031: B->preallocated = PETSC_TRUE;
3032: return(0);
3033: }
3035: extern PetscErrorCode MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec);
3036: extern PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal);
3040: PETSC_INTERN PetscErrorCode MatConvert_MPIBAIJ_MPIAdj(Mat B, MatType newtype,MatReuse reuse,Mat *adj)
3041: {
3042: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data;
3044: Mat_SeqBAIJ *d = (Mat_SeqBAIJ*) b->A->data,*o = (Mat_SeqBAIJ*) b->B->data;
3045: PetscInt M = B->rmap->n/B->rmap->bs,i,*ii,*jj,cnt,j,k,rstart = B->rmap->rstart/B->rmap->bs;
3046: const PetscInt *id = d->i, *jd = d->j, *io = o->i, *jo = o->j, *garray = b->garray;
3049: PetscMalloc1(M+1,&ii);
3050: ii[0] = 0;
3051: for (i=0; i<M; i++) {
3052: if ((id[i+1] - id[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,id[i],id[i+1]);
3053: if ((io[i+1] - io[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,io[i],io[i+1]);
3054: ii[i+1] = ii[i] + id[i+1] - id[i] + io[i+1] - io[i];
3055: /* remove one from count of matrix has diagonal */
3056: for (j=id[i]; j<id[i+1]; j++) {
3057: if (jd[j] == i) {ii[i+1]--;break;}
3058: }
3059: }
3060: PetscMalloc1(ii[M],&jj);
3061: cnt = 0;
3062: for (i=0; i<M; i++) {
3063: for (j=io[i]; j<io[i+1]; j++) {
3064: if (garray[jo[j]] > rstart) break;
3065: jj[cnt++] = garray[jo[j]];
3066: }
3067: for (k=id[i]; k<id[i+1]; k++) {
3068: if (jd[k] != i) {
3069: jj[cnt++] = rstart + jd[k];
3070: }
3071: }
3072: for (; j<io[i+1]; j++) {
3073: jj[cnt++] = garray[jo[j]];
3074: }
3075: }
3076: MatCreateMPIAdj(PetscObjectComm((PetscObject)B),M,B->cmap->N/B->rmap->bs,ii,jj,NULL,adj);
3077: return(0);
3078: }
3080: #include <../src/mat/impls/aij/mpi/mpiaij.h>
3082: PETSC_INTERN PetscErrorCode MatConvert_SeqBAIJ_SeqAIJ(Mat,MatType,MatReuse,Mat*);
3086: PETSC_INTERN PetscErrorCode MatConvert_MPIBAIJ_MPIAIJ(Mat A,MatType newtype,MatReuse reuse,Mat *newmat)
3087: {
3089: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
3090: Mat B;
3091: Mat_MPIAIJ *b;
3094: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix must be assembled");
3096: MatCreate(PetscObjectComm((PetscObject)A),&B);
3097: MatSetType(B,MATMPIAIJ);
3098: MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
3099: MatSetBlockSizes(B,A->rmap->bs,A->cmap->bs);
3100: MatSeqAIJSetPreallocation(B,0,NULL);
3101: MatMPIAIJSetPreallocation(B,0,NULL,0,NULL);
3102: b = (Mat_MPIAIJ*) B->data;
3104: MatDestroy(&b->A);
3105: MatDestroy(&b->B);
3106: MatDisAssemble_MPIBAIJ(A);
3107: MatConvert_SeqBAIJ_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A);
3108: MatConvert_SeqBAIJ_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B);
3109: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3110: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3111: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
3112: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
3113: if (reuse == MAT_INPLACE_MATRIX) {
3114: MatHeaderReplace(A,&B);
3115: } else {
3116: *newmat = B;
3117: }
3118: return(0);
3119: }
3121: /*MC
3122: MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices.
3124: Options Database Keys:
3125: + -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions()
3126: . -mat_block_size <bs> - set the blocksize used to store the matrix
3127: - -mat_use_hash_table <fact>
3129: Level: beginner
3131: .seealso: MatCreateMPIBAIJ
3132: M*/
3134: PETSC_INTERN PetscErrorCode MatConvert_MPIBAIJ_MPIBSTRM(Mat,MatType,MatReuse,Mat*);
3138: PETSC_EXTERN PetscErrorCode MatCreate_MPIBAIJ(Mat B)
3139: {
3140: Mat_MPIBAIJ *b;
3142: PetscBool flg = PETSC_FALSE;
3145: PetscNewLog(B,&b);
3146: B->data = (void*)b;
3148: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
3149: B->assembled = PETSC_FALSE;
3151: B->insertmode = NOT_SET_VALUES;
3152: MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);
3153: MPI_Comm_size(PetscObjectComm((PetscObject)B),&b->size);
3155: /* build local table of row and column ownerships */
3156: PetscMalloc1(b->size+1,&b->rangebs);
3158: /* build cache for off array entries formed */
3159: MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);
3161: b->donotstash = PETSC_FALSE;
3162: b->colmap = NULL;
3163: b->garray = NULL;
3164: b->roworiented = PETSC_TRUE;
3166: /* stuff used in block assembly */
3167: b->barray = 0;
3169: /* stuff used for matrix vector multiply */
3170: b->lvec = 0;
3171: b->Mvctx = 0;
3173: /* stuff for MatGetRow() */
3174: b->rowindices = 0;
3175: b->rowvalues = 0;
3176: b->getrowactive = PETSC_FALSE;
3178: /* hash table stuff */
3179: b->ht = 0;
3180: b->hd = 0;
3181: b->ht_size = 0;
3182: b->ht_flag = PETSC_FALSE;
3183: b->ht_fact = 0;
3184: b->ht_total_ct = 0;
3185: b->ht_insert_ct = 0;
3187: /* stuff for MatGetSubMatrices_MPIBAIJ_local() */
3188: b->ijonly = PETSC_FALSE;
3191: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpiadj_C",MatConvert_MPIBAIJ_MPIAdj);
3192: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpiaij_C",MatConvert_MPIBAIJ_MPIAIJ);
3193: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpisbaij_C",MatConvert_MPIBAIJ_MPISBAIJ);
3194: PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIBAIJ);
3195: PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIBAIJ);
3196: PetscObjectComposeFunction((PetscObject)B,"MatGetDiagonalBlock_C",MatGetDiagonalBlock_MPIBAIJ);
3197: PetscObjectComposeFunction((PetscObject)B,"MatMPIBAIJSetPreallocation_C",MatMPIBAIJSetPreallocation_MPIBAIJ);
3198: PetscObjectComposeFunction((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",MatMPIBAIJSetPreallocationCSR_MPIBAIJ);
3199: PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIBAIJ);
3200: PetscObjectComposeFunction((PetscObject)B,"MatSetHashTableFactor_C",MatSetHashTableFactor_MPIBAIJ);
3201: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpibstrm_C",MatConvert_MPIBAIJ_MPIBSTRM);
3202: PetscObjectChangeTypeName((PetscObject)B,MATMPIBAIJ);
3204: PetscOptionsBegin(PetscObjectComm((PetscObject)B),NULL,"Options for loading MPIBAIJ matrix 1","Mat");
3205: PetscOptionsBool("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",flg,&flg,NULL);
3206: if (flg) {
3207: PetscReal fact = 1.39;
3208: MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);
3209: PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,NULL);
3210: if (fact <= 1.0) fact = 1.39;
3211: MatMPIBAIJSetHashTableFactor(B,fact);
3212: PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);
3213: }
3214: PetscOptionsEnd();
3215: return(0);
3216: }
3218: /*MC
3219: MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices.
3221: This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator,
3222: and MATMPIBAIJ otherwise.
3224: Options Database Keys:
3225: . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions()
3227: Level: beginner
3229: .seealso: MatCreateBAIJ(),MATSEQBAIJ,MATMPIBAIJ, MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
3230: M*/
3234: /*@C
3235: MatMPIBAIJSetPreallocation - Allocates memory for a sparse parallel matrix in block AIJ format
3236: (block compressed row). For good matrix assembly performance
3237: the user should preallocate the matrix storage by setting the parameters
3238: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3239: performance can be increased by more than a factor of 50.
3241: Collective on Mat
3243: Input Parameters:
3244: + B - the matrix
3245: . bs - size of block, the blocks are ALWAYS square. One can use MatSetBlockSizes() to set a different row and column blocksize but the row
3246: blocksize always defines the size of the blocks. The column blocksize sets the blocksize of the vectors obtained with MatCreateVecs()
3247: . d_nz - number of block nonzeros per block row in diagonal portion of local
3248: submatrix (same for all local rows)
3249: . d_nnz - array containing the number of block nonzeros in the various block rows
3250: of the in diagonal portion of the local (possibly different for each block
3251: row) or NULL. If you plan to factor the matrix you must leave room for the diagonal entry and
3252: set it even if it is zero.
3253: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
3254: submatrix (same for all local rows).
3255: - o_nnz - array containing the number of nonzeros in the various block rows of the
3256: off-diagonal portion of the local submatrix (possibly different for
3257: each block row) or NULL.
3259: If the *_nnz parameter is given then the *_nz parameter is ignored
3261: Options Database Keys:
3262: + -mat_block_size - size of the blocks to use
3263: - -mat_use_hash_table <fact>
3265: Notes:
3266: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
3267: than it must be used on all processors that share the object for that argument.
3269: Storage Information:
3270: For a square global matrix we define each processor's diagonal portion
3271: to be its local rows and the corresponding columns (a square submatrix);
3272: each processor's off-diagonal portion encompasses the remainder of the
3273: local matrix (a rectangular submatrix).
3275: The user can specify preallocated storage for the diagonal part of
3276: the local submatrix with either d_nz or d_nnz (not both). Set
3277: d_nz=PETSC_DEFAULT and d_nnz=NULL for PETSc to control dynamic
3278: memory allocation. Likewise, specify preallocated storage for the
3279: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
3281: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
3282: the figure below we depict these three local rows and all columns (0-11).
3284: .vb
3285: 0 1 2 3 4 5 6 7 8 9 10 11
3286: --------------------------
3287: row 3 |o o o d d d o o o o o o
3288: row 4 |o o o d d d o o o o o o
3289: row 5 |o o o d d d o o o o o o
3290: --------------------------
3291: .ve
3293: Thus, any entries in the d locations are stored in the d (diagonal)
3294: submatrix, and any entries in the o locations are stored in the
3295: o (off-diagonal) submatrix. Note that the d and the o submatrices are
3296: stored simply in the MATSEQBAIJ format for compressed row storage.
3298: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
3299: and o_nz should indicate the number of block nonzeros per row in the o matrix.
3300: In general, for PDE problems in which most nonzeros are near the diagonal,
3301: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
3302: or you will get TERRIBLE performance; see the users' manual chapter on
3303: matrices.
3305: You can call MatGetInfo() to get information on how effective the preallocation was;
3306: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3307: You can also run with the option -info and look for messages with the string
3308: malloc in them to see if additional memory allocation was needed.
3310: Level: intermediate
3312: .keywords: matrix, block, aij, compressed row, sparse, parallel
3314: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocationCSR(), PetscSplitOwnership()
3315: @*/
3316: PetscErrorCode MatMPIBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3317: {
3324: PetscTryMethod(B,"MatMPIBAIJSetPreallocation_C",(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,bs,d_nz,d_nnz,o_nz,o_nnz));
3325: return(0);
3326: }
3330: /*@C
3331: MatCreateBAIJ - Creates a sparse parallel matrix in block AIJ format
3332: (block compressed row). For good matrix assembly performance
3333: the user should preallocate the matrix storage by setting the parameters
3334: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3335: performance can be increased by more than a factor of 50.
3337: Collective on MPI_Comm
3339: Input Parameters:
3340: + comm - MPI communicator
3341: . bs - size of block, the blocks are ALWAYS square. One can use MatSetBlockSizes() to set a different row and column blocksize but the row
3342: blocksize always defines the size of the blocks. The column blocksize sets the blocksize of the vectors obtained with MatCreateVecs()
3343: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3344: This value should be the same as the local size used in creating the
3345: y vector for the matrix-vector product y = Ax.
3346: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
3347: This value should be the same as the local size used in creating the
3348: x vector for the matrix-vector product y = Ax.
3349: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3350: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3351: . d_nz - number of nonzero blocks per block row in diagonal portion of local
3352: submatrix (same for all local rows)
3353: . d_nnz - array containing the number of nonzero blocks in the various block rows
3354: of the in diagonal portion of the local (possibly different for each block
3355: row) or NULL. If you plan to factor the matrix you must leave room for the diagonal entry
3356: and set it even if it is zero.
3357: . o_nz - number of nonzero blocks per block row in the off-diagonal portion of local
3358: submatrix (same for all local rows).
3359: - o_nnz - array containing the number of nonzero blocks in the various block rows of the
3360: off-diagonal portion of the local submatrix (possibly different for
3361: each block row) or NULL.
3363: Output Parameter:
3364: . A - the matrix
3366: Options Database Keys:
3367: + -mat_block_size - size of the blocks to use
3368: - -mat_use_hash_table <fact>
3370: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3371: MatXXXXSetPreallocation() paradgm instead of this routine directly.
3372: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3374: Notes:
3375: If the *_nnz parameter is given then the *_nz parameter is ignored
3377: A nonzero block is any block that as 1 or more nonzeros in it
3379: The user MUST specify either the local or global matrix dimensions
3380: (possibly both).
3382: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
3383: than it must be used on all processors that share the object for that argument.
3385: Storage Information:
3386: For a square global matrix we define each processor's diagonal portion
3387: to be its local rows and the corresponding columns (a square submatrix);
3388: each processor's off-diagonal portion encompasses the remainder of the
3389: local matrix (a rectangular submatrix).
3391: The user can specify preallocated storage for the diagonal part of
3392: the local submatrix with either d_nz or d_nnz (not both). Set
3393: d_nz=PETSC_DEFAULT and d_nnz=NULL for PETSc to control dynamic
3394: memory allocation. Likewise, specify preallocated storage for the
3395: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
3397: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
3398: the figure below we depict these three local rows and all columns (0-11).
3400: .vb
3401: 0 1 2 3 4 5 6 7 8 9 10 11
3402: --------------------------
3403: row 3 |o o o d d d o o o o o o
3404: row 4 |o o o d d d o o o o o o
3405: row 5 |o o o d d d o o o o o o
3406: --------------------------
3407: .ve
3409: Thus, any entries in the d locations are stored in the d (diagonal)
3410: submatrix, and any entries in the o locations are stored in the
3411: o (off-diagonal) submatrix. Note that the d and the o submatrices are
3412: stored simply in the MATSEQBAIJ format for compressed row storage.
3414: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
3415: and o_nz should indicate the number of block nonzeros per row in the o matrix.
3416: In general, for PDE problems in which most nonzeros are near the diagonal,
3417: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
3418: or you will get TERRIBLE performance; see the users' manual chapter on
3419: matrices.
3421: Level: intermediate
3423: .keywords: matrix, block, aij, compressed row, sparse, parallel
3425: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
3426: @*/
3427: PetscErrorCode MatCreateBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3428: {
3430: PetscMPIInt size;
3433: MatCreate(comm,A);
3434: MatSetSizes(*A,m,n,M,N);
3435: MPI_Comm_size(comm,&size);
3436: if (size > 1) {
3437: MatSetType(*A,MATMPIBAIJ);
3438: MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
3439: } else {
3440: MatSetType(*A,MATSEQBAIJ);
3441: MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
3442: }
3443: return(0);
3444: }
3448: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
3449: {
3450: Mat mat;
3451: Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data;
3453: PetscInt len=0;
3456: *newmat = 0;
3457: MatCreate(PetscObjectComm((PetscObject)matin),&mat);
3458: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
3459: MatSetType(mat,((PetscObject)matin)->type_name);
3460: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
3462: mat->factortype = matin->factortype;
3463: mat->preallocated = PETSC_TRUE;
3464: mat->assembled = PETSC_TRUE;
3465: mat->insertmode = NOT_SET_VALUES;
3467: a = (Mat_MPIBAIJ*)mat->data;
3468: mat->rmap->bs = matin->rmap->bs;
3469: a->bs2 = oldmat->bs2;
3470: a->mbs = oldmat->mbs;
3471: a->nbs = oldmat->nbs;
3472: a->Mbs = oldmat->Mbs;
3473: a->Nbs = oldmat->Nbs;
3475: PetscLayoutReference(matin->rmap,&mat->rmap);
3476: PetscLayoutReference(matin->cmap,&mat->cmap);
3478: a->size = oldmat->size;
3479: a->rank = oldmat->rank;
3480: a->donotstash = oldmat->donotstash;
3481: a->roworiented = oldmat->roworiented;
3482: a->rowindices = 0;
3483: a->rowvalues = 0;
3484: a->getrowactive = PETSC_FALSE;
3485: a->barray = 0;
3486: a->rstartbs = oldmat->rstartbs;
3487: a->rendbs = oldmat->rendbs;
3488: a->cstartbs = oldmat->cstartbs;
3489: a->cendbs = oldmat->cendbs;
3491: /* hash table stuff */
3492: a->ht = 0;
3493: a->hd = 0;
3494: a->ht_size = 0;
3495: a->ht_flag = oldmat->ht_flag;
3496: a->ht_fact = oldmat->ht_fact;
3497: a->ht_total_ct = 0;
3498: a->ht_insert_ct = 0;
3500: PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+1)*sizeof(PetscInt));
3501: if (oldmat->colmap) {
3502: #if defined(PETSC_USE_CTABLE)
3503: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
3504: #else
3505: PetscMalloc1(a->Nbs,&a->colmap);
3506: PetscLogObjectMemory((PetscObject)mat,(a->Nbs)*sizeof(PetscInt));
3507: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
3508: #endif
3509: } else a->colmap = 0;
3511: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
3512: PetscMalloc1(len,&a->garray);
3513: PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
3514: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
3515: } else a->garray = 0;
3517: MatStashCreate_Private(PetscObjectComm((PetscObject)matin),matin->rmap->bs,&mat->bstash);
3518: VecDuplicate(oldmat->lvec,&a->lvec);
3519: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
3520: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
3521: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
3523: MatDuplicate(oldmat->A,cpvalues,&a->A);
3524: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
3525: MatDuplicate(oldmat->B,cpvalues,&a->B);
3526: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
3527: PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
3528: *newmat = mat;
3529: return(0);
3530: }
3534: PetscErrorCode MatLoad_MPIBAIJ(Mat newmat,PetscViewer viewer)
3535: {
3537: int fd;
3538: PetscInt i,nz,j,rstart,rend;
3539: PetscScalar *vals,*buf;
3540: MPI_Comm comm;
3541: MPI_Status status;
3542: PetscMPIInt rank,size,maxnz;
3543: PetscInt header[4],*rowlengths = 0,M,N,m,*rowners,*cols;
3544: PetscInt *locrowlens = NULL,*procsnz = NULL,*browners = NULL;
3545: PetscInt jj,*mycols,*ibuf,bs = newmat->rmap->bs,Mbs,mbs,extra_rows,mmax;
3546: PetscMPIInt tag = ((PetscObject)viewer)->tag;
3547: PetscInt *dlens = NULL,*odlens = NULL,*mask = NULL,*masked1 = NULL,*masked2 = NULL,rowcount,odcount;
3548: PetscInt dcount,kmax,k,nzcount,tmp,mend;
3551: /* force binary viewer to load .info file if it has not yet done so */
3552: PetscViewerSetUp(viewer);
3553: PetscObjectGetComm((PetscObject)viewer,&comm);
3554: PetscOptionsBegin(comm,NULL,"Options for loading MPIBAIJ matrix 2","Mat");
3555: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);
3556: PetscOptionsEnd();
3557: if (bs < 0) bs = 1;
3559: MPI_Comm_size(comm,&size);
3560: MPI_Comm_rank(comm,&rank);
3561: PetscViewerBinaryGetDescriptor(viewer,&fd);
3562: if (!rank) {
3563: PetscBinaryRead(fd,(char*)header,4,PETSC_INT);
3564: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
3565: if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newmat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk, cannot load as MPIAIJ");
3566: }
3567: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
3568: M = header[1]; N = header[2];
3570: /* If global sizes are set, check if they are consistent with that given in the file */
3571: if (newmat->rmap->N >= 0 && newmat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newmat->rmap->N,M);
3572: if (newmat->cmap->N >= 0 && newmat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newmat->cmap->N,N);
3574: if (M != N) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"Can only do square matrices");
3576: /*
3577: This code adds extra rows to make sure the number of rows is
3578: divisible by the blocksize
3579: */
3580: Mbs = M/bs;
3581: extra_rows = bs - M + bs*Mbs;
3582: if (extra_rows == bs) extra_rows = 0;
3583: else Mbs++;
3584: if (extra_rows && !rank) {
3585: PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");
3586: }
3588: /* determine ownership of all rows */
3589: if (newmat->rmap->n < 0) { /* PETSC_DECIDE */
3590: mbs = Mbs/size + ((Mbs % size) > rank);
3591: m = mbs*bs;
3592: } else { /* User set */
3593: m = newmat->rmap->n;
3594: mbs = m/bs;
3595: }
3596: PetscMalloc2(size+1,&rowners,size+1,&browners);
3597: MPI_Allgather(&mbs,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
3599: /* process 0 needs enough room for process with most rows */
3600: if (!rank) {
3601: mmax = rowners[1];
3602: for (i=2; i<=size; i++) {
3603: mmax = PetscMax(mmax,rowners[i]);
3604: }
3605: mmax*=bs;
3606: } else mmax = -1; /* unused, but compiler warns anyway */
3608: rowners[0] = 0;
3609: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
3610: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
3611: rstart = rowners[rank];
3612: rend = rowners[rank+1];
3614: /* distribute row lengths to all processors */
3615: PetscMalloc1(m,&locrowlens);
3616: if (!rank) {
3617: mend = m;
3618: if (size == 1) mend = mend - extra_rows;
3619: PetscBinaryRead(fd,locrowlens,mend,PETSC_INT);
3620: for (j=mend; j<m; j++) locrowlens[j] = 1;
3621: PetscMalloc1(mmax,&rowlengths);
3622: PetscCalloc1(size,&procsnz);
3623: for (j=0; j<m; j++) {
3624: procsnz[0] += locrowlens[j];
3625: }
3626: for (i=1; i<size; i++) {
3627: mend = browners[i+1] - browners[i];
3628: if (i == size-1) mend = mend - extra_rows;
3629: PetscBinaryRead(fd,rowlengths,mend,PETSC_INT);
3630: for (j=mend; j<browners[i+1] - browners[i]; j++) rowlengths[j] = 1;
3631: /* calculate the number of nonzeros on each processor */
3632: for (j=0; j<browners[i+1]-browners[i]; j++) {
3633: procsnz[i] += rowlengths[j];
3634: }
3635: MPI_Send(rowlengths,browners[i+1]-browners[i],MPIU_INT,i,tag,comm);
3636: }
3637: PetscFree(rowlengths);
3638: } else {
3639: MPI_Recv(locrowlens,m,MPIU_INT,0,tag,comm,&status);
3640: }
3642: if (!rank) {
3643: /* determine max buffer needed and allocate it */
3644: maxnz = procsnz[0];
3645: for (i=1; i<size; i++) {
3646: maxnz = PetscMax(maxnz,procsnz[i]);
3647: }
3648: PetscMalloc1(maxnz,&cols);
3650: /* read in my part of the matrix column indices */
3651: nz = procsnz[0];
3652: PetscMalloc1(nz+1,&ibuf);
3653: mycols = ibuf;
3654: if (size == 1) nz -= extra_rows;
3655: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
3656: if (size == 1) {
3657: for (i=0; i< extra_rows; i++) mycols[nz+i] = M+i;
3658: }
3660: /* read in every ones (except the last) and ship off */
3661: for (i=1; i<size-1; i++) {
3662: nz = procsnz[i];
3663: PetscBinaryRead(fd,cols,nz,PETSC_INT);
3664: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
3665: }
3666: /* read in the stuff for the last proc */
3667: if (size != 1) {
3668: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
3669: PetscBinaryRead(fd,cols,nz,PETSC_INT);
3670: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
3671: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
3672: }
3673: PetscFree(cols);
3674: } else {
3675: /* determine buffer space needed for message */
3676: nz = 0;
3677: for (i=0; i<m; i++) {
3678: nz += locrowlens[i];
3679: }
3680: PetscMalloc1(nz+1,&ibuf);
3681: mycols = ibuf;
3682: /* receive message of column indices*/
3683: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
3684: MPI_Get_count(&status,MPIU_INT,&maxnz);
3685: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
3686: }
3688: /* loop over local rows, determining number of off diagonal entries */
3689: PetscMalloc2(rend-rstart,&dlens,rend-rstart,&odlens);
3690: PetscCalloc3(Mbs,&mask,Mbs,&masked1,Mbs,&masked2);
3691: rowcount = 0; nzcount = 0;
3692: for (i=0; i<mbs; i++) {
3693: dcount = 0;
3694: odcount = 0;
3695: for (j=0; j<bs; j++) {
3696: kmax = locrowlens[rowcount];
3697: for (k=0; k<kmax; k++) {
3698: tmp = mycols[nzcount++]/bs;
3699: if (!mask[tmp]) {
3700: mask[tmp] = 1;
3701: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp;
3702: else masked1[dcount++] = tmp;
3703: }
3704: }
3705: rowcount++;
3706: }
3708: dlens[i] = dcount;
3709: odlens[i] = odcount;
3711: /* zero out the mask elements we set */
3712: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
3713: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
3714: }
3716: MatSetSizes(newmat,m,m,M+extra_rows,N+extra_rows);
3717: MatMPIBAIJSetPreallocation(newmat,bs,0,dlens,0,odlens);
3719: if (!rank) {
3720: PetscMalloc1(maxnz+1,&buf);
3721: /* read in my part of the matrix numerical values */
3722: nz = procsnz[0];
3723: vals = buf;
3724: mycols = ibuf;
3725: if (size == 1) nz -= extra_rows;
3726: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3727: if (size == 1) {
3728: for (i=0; i< extra_rows; i++) vals[nz+i] = 1.0;
3729: }
3731: /* insert into matrix */
3732: jj = rstart*bs;
3733: for (i=0; i<m; i++) {
3734: MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
3735: mycols += locrowlens[i];
3736: vals += locrowlens[i];
3737: jj++;
3738: }
3739: /* read in other processors (except the last one) and ship out */
3740: for (i=1; i<size-1; i++) {
3741: nz = procsnz[i];
3742: vals = buf;
3743: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3744: MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
3745: }
3746: /* the last proc */
3747: if (size != 1) {
3748: nz = procsnz[i] - extra_rows;
3749: vals = buf;
3750: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3751: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
3752: MPIULong_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)newmat)->tag,comm);
3753: }
3754: PetscFree(procsnz);
3755: } else {
3756: /* receive numeric values */
3757: PetscMalloc1(nz+1,&buf);
3759: /* receive message of values*/
3760: vals = buf;
3761: mycols = ibuf;
3762: MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm);
3764: /* insert into matrix */
3765: jj = rstart*bs;
3766: for (i=0; i<m; i++) {
3767: MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
3768: mycols += locrowlens[i];
3769: vals += locrowlens[i];
3770: jj++;
3771: }
3772: }
3773: PetscFree(locrowlens);
3774: PetscFree(buf);
3775: PetscFree(ibuf);
3776: PetscFree2(rowners,browners);
3777: PetscFree2(dlens,odlens);
3778: PetscFree3(mask,masked1,masked2);
3779: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
3780: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
3781: return(0);
3782: }
3786: /*@
3787: MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
3789: Input Parameters:
3790: . mat - the matrix
3791: . fact - factor
3793: Not Collective, each process can use a different factor
3795: Level: advanced
3797: Notes:
3798: This can also be set by the command line option: -mat_use_hash_table <fact>
3800: .keywords: matrix, hashtable, factor, HT
3802: .seealso: MatSetOption()
3803: @*/
3804: PetscErrorCode MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact)
3805: {
3809: PetscTryMethod(mat,"MatSetHashTableFactor_C",(Mat,PetscReal),(mat,fact));
3810: return(0);
3811: }
3815: PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact)
3816: {
3817: Mat_MPIBAIJ *baij;
3820: baij = (Mat_MPIBAIJ*)mat->data;
3821: baij->ht_fact = fact;
3822: return(0);
3823: }
3827: PetscErrorCode MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
3828: {
3829: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
3832: if (Ad) *Ad = a->A;
3833: if (Ao) *Ao = a->B;
3834: if (colmap) *colmap = a->garray;
3835: return(0);
3836: }
3838: /*
3839: Special version for direct calls from Fortran (to eliminate two function call overheads
3840: */
3841: #if defined(PETSC_HAVE_FORTRAN_CAPS)
3842: #define matmpibaijsetvaluesblocked_ MATMPIBAIJSETVALUESBLOCKED
3843: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
3844: #define matmpibaijsetvaluesblocked_ matmpibaijsetvaluesblocked
3845: #endif
3849: /*@C
3850: MatMPIBAIJSetValuesBlocked - Direct Fortran call to replace call to MatSetValuesBlocked()
3852: Collective on Mat
3854: Input Parameters:
3855: + mat - the matrix
3856: . min - number of input rows
3857: . im - input rows
3858: . nin - number of input columns
3859: . in - input columns
3860: . v - numerical values input
3861: - addvin - INSERT_VALUES or ADD_VALUES
3863: Notes: This has a complete copy of MatSetValuesBlocked_MPIBAIJ() which is terrible code un-reuse.
3865: Level: advanced
3867: .seealso: MatSetValuesBlocked()
3868: @*/
3869: PetscErrorCode matmpibaijsetvaluesblocked_(Mat *matin,PetscInt *min,const PetscInt im[],PetscInt *nin,const PetscInt in[],const MatScalar v[],InsertMode *addvin)
3870: {
3871: /* convert input arguments to C version */
3872: Mat mat = *matin;
3873: PetscInt m = *min, n = *nin;
3874: InsertMode addv = *addvin;
3876: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
3877: const MatScalar *value;
3878: MatScalar *barray = baij->barray;
3879: PetscBool roworiented = baij->roworiented;
3880: PetscErrorCode ierr;
3881: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
3882: PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval;
3883: PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2;
3886: /* tasks normally handled by MatSetValuesBlocked() */
3887: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
3888: #if defined(PETSC_USE_DEBUG)
3889: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
3890: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3891: #endif
3892: if (mat->assembled) {
3893: mat->was_assembled = PETSC_TRUE;
3894: mat->assembled = PETSC_FALSE;
3895: }
3896: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
3899: if (!barray) {
3900: PetscMalloc1(bs2,&barray);
3901: baij->barray = barray;
3902: }
3904: if (roworiented) stepval = (n-1)*bs;
3905: else stepval = (m-1)*bs;
3907: for (i=0; i<m; i++) {
3908: if (im[i] < 0) continue;
3909: #if defined(PETSC_USE_DEBUG)
3910: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
3911: #endif
3912: if (im[i] >= rstart && im[i] < rend) {
3913: row = im[i] - rstart;
3914: for (j=0; j<n; j++) {
3915: /* If NumCol = 1 then a copy is not required */
3916: if ((roworiented) && (n == 1)) {
3917: barray = (MatScalar*)v + i*bs2;
3918: } else if ((!roworiented) && (m == 1)) {
3919: barray = (MatScalar*)v + j*bs2;
3920: } else { /* Here a copy is required */
3921: if (roworiented) {
3922: value = v + i*(stepval+bs)*bs + j*bs;
3923: } else {
3924: value = v + j*(stepval+bs)*bs + i*bs;
3925: }
3926: for (ii=0; ii<bs; ii++,value+=stepval) {
3927: for (jj=0; jj<bs; jj++) {
3928: *barray++ = *value++;
3929: }
3930: }
3931: barray -=bs2;
3932: }
3934: if (in[j] >= cstart && in[j] < cend) {
3935: col = in[j] - cstart;
3936: MatSetValuesBlocked_SeqBAIJ_Inlined(baij->A,row,col,barray,addv,im[i],in[j]);
3937: } else if (in[j] < 0) continue;
3938: #if defined(PETSC_USE_DEBUG)
3939: else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);
3940: #endif
3941: else {
3942: if (mat->was_assembled) {
3943: if (!baij->colmap) {
3944: MatCreateColmap_MPIBAIJ_Private(mat);
3945: }
3947: #if defined(PETSC_USE_DEBUG)
3948: #if defined(PETSC_USE_CTABLE)
3949: { PetscInt data;
3950: PetscTableFind(baij->colmap,in[j]+1,&data);
3951: if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
3952: }
3953: #else
3954: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
3955: #endif
3956: #endif
3957: #if defined(PETSC_USE_CTABLE)
3958: PetscTableFind(baij->colmap,in[j]+1,&col);
3959: col = (col - 1)/bs;
3960: #else
3961: col = (baij->colmap[in[j]] - 1)/bs;
3962: #endif
3963: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
3964: MatDisAssemble_MPIBAIJ(mat);
3965: col = in[j];
3966: }
3967: } else col = in[j];
3968: MatSetValuesBlocked_SeqBAIJ_Inlined(baij->B,row,col,barray,addv,im[i],in[j]);
3969: }
3970: }
3971: } else {
3972: if (!baij->donotstash) {
3973: if (roworiented) {
3974: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
3975: } else {
3976: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
3977: }
3978: }
3979: }
3980: }
3982: /* task normally handled by MatSetValuesBlocked() */
3983: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
3984: return(0);
3985: }
3989: /*@
3990: MatCreateMPIBAIJWithArrays - creates a MPI BAIJ matrix using arrays that contain in standard
3991: CSR format the local rows.
3993: Collective on MPI_Comm
3995: Input Parameters:
3996: + comm - MPI communicator
3997: . bs - the block size, only a block size of 1 is supported
3998: . m - number of local rows (Cannot be PETSC_DECIDE)
3999: . n - This value should be the same as the local size used in creating the
4000: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4001: calculated if N is given) For square matrices n is almost always m.
4002: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4003: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4004: . i - row indices
4005: . j - column indices
4006: - a - matrix values
4008: Output Parameter:
4009: . mat - the matrix
4011: Level: intermediate
4013: Notes:
4014: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4015: thus you CANNOT change the matrix entries by changing the values of a[] after you have
4016: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
4018: The order of the entries in values is the same as the block compressed sparse row storage format; that is, it is
4019: the same as a three dimensional array in Fortran values(bs,bs,nnz) that contains the first column of the first
4020: block, followed by the second column of the first block etc etc. That is, the blocks are contiguous in memory
4021: with column-major ordering within blocks.
4023: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
4025: .keywords: matrix, aij, compressed row, sparse, parallel
4027: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4028: MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
4029: @*/
4030: PetscErrorCode MatCreateMPIBAIJWithArrays(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4031: {
4035: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4036: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4037: MatCreate(comm,mat);
4038: MatSetSizes(*mat,m,n,M,N);
4039: MatSetType(*mat,MATMPISBAIJ);
4040: MatSetOption(*mat,MAT_ROW_ORIENTED,PETSC_FALSE);
4041: MatMPIBAIJSetPreallocationCSR(*mat,bs,i,j,a);
4042: MatSetOption(*mat,MAT_ROW_ORIENTED,PETSC_TRUE);
4043: return(0);
4044: }
4048: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIBAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4049: {
4051: PetscInt m,N,i,rstart,nnz,Ii,bs,cbs;
4052: PetscInt *indx;
4053: PetscScalar *values;
4056: MatGetSize(inmat,&m,&N);
4057: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4058: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)inmat->data;
4059: PetscInt *dnz,*onz,sum,mbs,Nbs;
4060: PetscInt *bindx,rmax=a->rmax,j;
4061:
4062: MatGetBlockSizes(inmat,&bs,&cbs);
4063: mbs = m/bs; Nbs = N/cbs;
4064: if (n == PETSC_DECIDE) {
4065: PetscSplitOwnership(comm,&n,&Nbs);
4066: }
4067: /* Check sum(n) = Nbs */
4068: MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4069: if (sum != Nbs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns != global columns %d",Nbs);
4071: MPI_Scan(&mbs, &rstart,1,MPIU_INT,MPI_SUM,comm);
4072: rstart -= mbs;
4074: PetscMalloc1(rmax,&bindx);
4075: MatPreallocateInitialize(comm,mbs,n,dnz,onz);
4076: for (i=0; i<mbs; i++) {
4077: MatGetRow_SeqBAIJ(inmat,i*bs,&nnz,&indx,NULL); /* non-blocked nnz and indx */
4078: nnz = nnz/bs;
4079: for (j=0; j<nnz; j++) bindx[j] = indx[j*bs]/bs;
4080: MatPreallocateSet(i+rstart,nnz,bindx,dnz,onz);
4081: MatRestoreRow_SeqBAIJ(inmat,i*bs,&nnz,&indx,NULL);
4082: }
4083: PetscFree(bindx);
4085: MatCreate(comm,outmat);
4086: MatSetSizes(*outmat,m,n*bs,PETSC_DETERMINE,PETSC_DETERMINE);
4087: MatSetBlockSizes(*outmat,bs,cbs);
4088: MatSetType(*outmat,MATMPIBAIJ);
4089: MatMPIBAIJSetPreallocation(*outmat,bs,0,dnz,0,onz);
4090: MatPreallocateFinalize(dnz,onz);
4091: }
4092:
4093: /* numeric phase */
4094: MatGetBlockSizes(inmat,&bs,&cbs);
4095: MatGetOwnershipRange(*outmat,&rstart,NULL);
4097: for (i=0; i<m; i++) {
4098: MatGetRow_SeqBAIJ(inmat,i,&nnz,&indx,&values);
4099: Ii = i + rstart;
4100: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4101: MatRestoreRow_SeqBAIJ(inmat,i,&nnz,&indx,&values);
4102: }
4103: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4104: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4105: return(0);
4106: }