Actual source code: mpibaij.c
petsc-3.3-p7 2013-05-11
2: #include <../src/mat/impls/baij/mpi/mpibaij.h> /*I "petscmat.h" I*/
3: #include <petscblaslapack.h>
5: extern PetscErrorCode MatSetUpMultiply_MPIBAIJ(Mat);
6: extern PetscErrorCode MatDisAssemble_MPIBAIJ(Mat);
7: extern PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],PetscScalar []);
8: extern PetscErrorCode MatSetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
9: extern PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
10: extern PetscErrorCode MatGetRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]);
11: extern PetscErrorCode MatRestoreRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]);
12: extern PetscErrorCode MatZeroRows_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscScalar,Vec,Vec);
16: PetscErrorCode MatGetRowMaxAbs_MPIBAIJ(Mat A,Vec v,PetscInt idx[])
17: {
18: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
20: PetscInt i,*idxb = 0;
21: PetscScalar *va,*vb;
22: Vec vtmp;
25: MatGetRowMaxAbs(a->A,v,idx);
26: VecGetArray(v,&va);
27: if (idx) {
28: for (i=0; i<A->rmap->n; i++) {if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;}
29: }
31: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
32: if (idx) {PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);}
33: MatGetRowMaxAbs(a->B,vtmp,idxb);
34: VecGetArray(vtmp,&vb);
36: for (i=0; i<A->rmap->n; i++){
37: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {va[i] = vb[i]; if (idx) idx[i] = A->cmap->bs*a->garray[idxb[i]/A->cmap->bs] + (idxb[i] % A->cmap->bs);}
38: }
40: VecRestoreArray(v,&va);
41: VecRestoreArray(vtmp,&vb);
42: PetscFree(idxb);
43: VecDestroy(&vtmp);
44: return(0);
45: }
47: EXTERN_C_BEGIN
50: PetscErrorCode MatStoreValues_MPIBAIJ(Mat mat)
51: {
52: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
56: MatStoreValues(aij->A);
57: MatStoreValues(aij->B);
58: return(0);
59: }
60: EXTERN_C_END
62: EXTERN_C_BEGIN
65: PetscErrorCode MatRetrieveValues_MPIBAIJ(Mat mat)
66: {
67: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
71: MatRetrieveValues(aij->A);
72: MatRetrieveValues(aij->B);
73: return(0);
74: }
75: EXTERN_C_END
77: /*
78: Local utility routine that creates a mapping from the global column
79: number to the local number in the off-diagonal part of the local
80: storage of the matrix. This is done in a non scalable way since the
81: length of colmap equals the global matrix length.
82: */
85: PetscErrorCode MatCreateColmap_MPIBAIJ_Private(Mat mat)
86: {
87: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
88: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
90: PetscInt nbs = B->nbs,i,bs=mat->rmap->bs;
93: #if defined (PETSC_USE_CTABLE)
94: PetscTableCreate(baij->nbs,baij->Nbs+1,&baij->colmap);
95: for (i=0; i<nbs; i++){
96: PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1,INSERT_VALUES);
97: }
98: #else
99: PetscMalloc((baij->Nbs+1)*sizeof(PetscInt),&baij->colmap);
100: PetscLogObjectMemory(mat,baij->Nbs*sizeof(PetscInt));
101: PetscMemzero(baij->colmap,baij->Nbs*sizeof(PetscInt));
102: for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1;
103: #endif
104: return(0);
105: }
107: #define MatSetValues_SeqBAIJ_A_Private(row,col,value,addv) \
108: { \
109: \
110: brow = row/bs; \
111: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
112: rmax = aimax[brow]; nrow = ailen[brow]; \
113: bcol = col/bs; \
114: ridx = row % bs; cidx = col % bs; \
115: low = 0; high = nrow; \
116: while (high-low > 3) { \
117: t = (low+high)/2; \
118: if (rp[t] > bcol) high = t; \
119: else low = t; \
120: } \
121: for (_i=low; _i<high; _i++) { \
122: if (rp[_i] > bcol) break; \
123: if (rp[_i] == bcol) { \
124: bap = ap + bs2*_i + bs*cidx + ridx; \
125: if (addv == ADD_VALUES) *bap += value; \
126: else *bap = value; \
127: goto a_noinsert; \
128: } \
129: } \
130: if (a->nonew == 1) goto a_noinsert; \
131: if (a->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
132: MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
133: N = nrow++ - 1; \
134: /* shift up all the later entries in this row */ \
135: for (ii=N; ii>=_i; ii--) { \
136: rp[ii+1] = rp[ii]; \
137: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
138: } \
139: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
140: rp[_i] = bcol; \
141: ap[bs2*_i + bs*cidx + ridx] = value; \
142: a_noinsert:; \
143: ailen[brow] = nrow; \
144: }
146: #define MatSetValues_SeqBAIJ_B_Private(row,col,value,addv) \
147: { \
148: brow = row/bs; \
149: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
150: rmax = bimax[brow]; nrow = bilen[brow]; \
151: bcol = col/bs; \
152: ridx = row % bs; cidx = col % bs; \
153: low = 0; high = nrow; \
154: while (high-low > 3) { \
155: t = (low+high)/2; \
156: if (rp[t] > bcol) high = t; \
157: else low = t; \
158: } \
159: for (_i=low; _i<high; _i++) { \
160: if (rp[_i] > bcol) break; \
161: if (rp[_i] == bcol) { \
162: bap = ap + bs2*_i + bs*cidx + ridx; \
163: if (addv == ADD_VALUES) *bap += value; \
164: else *bap = value; \
165: goto b_noinsert; \
166: } \
167: } \
168: if (b->nonew == 1) goto b_noinsert; \
169: if (b->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
170: MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
171: CHKMEMQ;\
172: N = nrow++ - 1; \
173: /* shift up all the later entries in this row */ \
174: for (ii=N; ii>=_i; ii--) { \
175: rp[ii+1] = rp[ii]; \
176: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
177: } \
178: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
179: rp[_i] = bcol; \
180: ap[bs2*_i + bs*cidx + ridx] = value; \
181: b_noinsert:; \
182: bilen[brow] = nrow; \
183: }
187: PetscErrorCode MatSetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
188: {
189: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
190: MatScalar value;
191: PetscBool roworiented = baij->roworiented;
193: PetscInt i,j,row,col;
194: PetscInt rstart_orig=mat->rmap->rstart;
195: PetscInt rend_orig=mat->rmap->rend,cstart_orig=mat->cmap->rstart;
196: PetscInt cend_orig=mat->cmap->rend,bs=mat->rmap->bs;
198: /* Some Variables required in the macro */
199: Mat A = baij->A;
200: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data;
201: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
202: MatScalar *aa=a->a;
204: Mat B = baij->B;
205: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
206: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
207: MatScalar *ba=b->a;
209: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
210: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
211: MatScalar *ap,*bap;
215: for (i=0; i<m; i++) {
216: if (im[i] < 0) continue;
217: #if defined(PETSC_USE_DEBUG)
218: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
219: #endif
220: if (im[i] >= rstart_orig && im[i] < rend_orig) {
221: row = im[i] - rstart_orig;
222: for (j=0; j<n; j++) {
223: if (in[j] >= cstart_orig && in[j] < cend_orig){
224: col = in[j] - cstart_orig;
225: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
226: MatSetValues_SeqBAIJ_A_Private(row,col,value,addv);
227: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
228: } else if (in[j] < 0) continue;
229: #if defined(PETSC_USE_DEBUG)
230: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
231: #endif
232: else {
233: if (mat->was_assembled) {
234: if (!baij->colmap) {
235: MatCreateColmap_MPIBAIJ_Private(mat);
236: }
237: #if defined (PETSC_USE_CTABLE)
238: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
239: col = col - 1;
240: #else
241: col = baij->colmap[in[j]/bs] - 1;
242: #endif
243: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
244: MatDisAssemble_MPIBAIJ(mat);
245: col = in[j];
246: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
247: B = baij->B;
248: b = (Mat_SeqBAIJ*)(B)->data;
249: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
250: ba=b->a;
251: } else col += in[j]%bs;
252: } else col = in[j];
253: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
254: MatSetValues_SeqBAIJ_B_Private(row,col,value,addv);
255: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
256: }
257: }
258: } else {
259: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
260: if (!baij->donotstash) {
261: mat->assembled = PETSC_FALSE;
262: if (roworiented) {
263: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);
264: } else {
265: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);
266: }
267: }
268: }
269: }
270: return(0);
271: }
275: PetscErrorCode MatSetValuesBlocked_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
276: {
277: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
278: const PetscScalar *value;
279: MatScalar *barray=baij->barray;
280: PetscBool roworiented = baij->roworiented;
281: PetscErrorCode ierr;
282: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
283: PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval;
284: PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2;
285:
287: if(!barray) {
288: PetscMalloc(bs2*sizeof(MatScalar),&barray);
289: baij->barray = barray;
290: }
292: if (roworiented) {
293: stepval = (n-1)*bs;
294: } else {
295: stepval = (m-1)*bs;
296: }
297: for (i=0; i<m; i++) {
298: if (im[i] < 0) continue;
299: #if defined(PETSC_USE_DEBUG)
300: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
301: #endif
302: if (im[i] >= rstart && im[i] < rend) {
303: row = im[i] - rstart;
304: for (j=0; j<n; j++) {
305: /* If NumCol = 1 then a copy is not required */
306: if ((roworiented) && (n == 1)) {
307: barray = (MatScalar*)v + i*bs2;
308: } else if((!roworiented) && (m == 1)) {
309: barray = (MatScalar*)v + j*bs2;
310: } else { /* Here a copy is required */
311: if (roworiented) {
312: value = v + (i*(stepval+bs) + j)*bs;
313: } else {
314: value = v + (j*(stepval+bs) + i)*bs;
315: }
316: for (ii=0; ii<bs; ii++,value+=bs+stepval) {
317: for (jj=0; jj<bs; jj++) {
318: barray[jj] = value[jj];
319: }
320: barray += bs;
321: }
322: barray -= bs2;
323: }
324:
325: if (in[j] >= cstart && in[j] < cend){
326: col = in[j] - cstart;
327: MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);
328: }
329: else if (in[j] < 0) continue;
330: #if defined(PETSC_USE_DEBUG)
331: else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);
332: #endif
333: else {
334: if (mat->was_assembled) {
335: if (!baij->colmap) {
336: MatCreateColmap_MPIBAIJ_Private(mat);
337: }
339: #if defined(PETSC_USE_DEBUG)
340: #if defined (PETSC_USE_CTABLE)
341: { PetscInt data;
342: PetscTableFind(baij->colmap,in[j]+1,&data);
343: if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
344: }
345: #else
346: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
347: #endif
348: #endif
349: #if defined (PETSC_USE_CTABLE)
350: PetscTableFind(baij->colmap,in[j]+1,&col);
351: col = (col - 1)/bs;
352: #else
353: col = (baij->colmap[in[j]] - 1)/bs;
354: #endif
355: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
356: MatDisAssemble_MPIBAIJ(mat);
357: col = in[j];
358: }
359: }
360: else col = in[j];
361: MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
362: }
363: }
364: } else {
365: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
366: if (!baij->donotstash) {
367: if (roworiented) {
368: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
369: } else {
370: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
371: }
372: }
373: }
374: }
375: return(0);
376: }
378: #define HASH_KEY 0.6180339887
379: #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp)))
380: /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
381: /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */
384: PetscErrorCode MatSetValues_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
385: {
386: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
387: PetscBool roworiented = baij->roworiented;
389: PetscInt i,j,row,col;
390: PetscInt rstart_orig=mat->rmap->rstart;
391: PetscInt rend_orig=mat->rmap->rend,Nbs=baij->Nbs;
392: PetscInt h1,key,size=baij->ht_size,bs=mat->rmap->bs,*HT=baij->ht,idx;
393: PetscReal tmp;
394: MatScalar **HD = baij->hd,value;
395: #if defined(PETSC_USE_DEBUG)
396: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
397: #endif
401: for (i=0; i<m; i++) {
402: #if defined(PETSC_USE_DEBUG)
403: if (im[i] < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
404: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
405: #endif
406: row = im[i];
407: if (row >= rstart_orig && row < rend_orig) {
408: for (j=0; j<n; j++) {
409: col = in[j];
410: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
411: /* Look up PetscInto the Hash Table */
412: key = (row/bs)*Nbs+(col/bs)+1;
413: h1 = HASH(size,key,tmp);
415:
416: idx = h1;
417: #if defined(PETSC_USE_DEBUG)
418: insert_ct++;
419: total_ct++;
420: if (HT[idx] != key) {
421: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
422: if (idx == size) {
423: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
424: if (idx == h1) {
425: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
426: }
427: }
428: }
429: #else
430: if (HT[idx] != key) {
431: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
432: if (idx == size) {
433: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
434: if (idx == h1) {
435: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
436: }
437: }
438: }
439: #endif
440: /* A HASH table entry is found, so insert the values at the correct address */
441: if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value;
442: else *(HD[idx]+ (col % bs)*bs + (row % bs)) = value;
443: }
444: } else {
445: if (!baij->donotstash) {
446: if (roworiented) {
447: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);
448: } else {
449: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);
450: }
451: }
452: }
453: }
454: #if defined(PETSC_USE_DEBUG)
455: baij->ht_total_ct = total_ct;
456: baij->ht_insert_ct = insert_ct;
457: #endif
458: return(0);
459: }
463: PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
464: {
465: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
466: PetscBool roworiented = baij->roworiented;
467: PetscErrorCode ierr;
468: PetscInt i,j,ii,jj,row,col;
469: PetscInt rstart=baij->rstartbs;
470: PetscInt rend=mat->rmap->rend,stepval,bs=mat->rmap->bs,bs2=baij->bs2,nbs2=n*bs2;
471: PetscInt h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs;
472: PetscReal tmp;
473: MatScalar **HD = baij->hd,*baij_a;
474: const PetscScalar *v_t,*value;
475: #if defined(PETSC_USE_DEBUG)
476: PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
477: #endif
478:
481: if (roworiented) {
482: stepval = (n-1)*bs;
483: } else {
484: stepval = (m-1)*bs;
485: }
486: for (i=0; i<m; i++) {
487: #if defined(PETSC_USE_DEBUG)
488: if (im[i] < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",im[i]);
489: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],baij->Mbs-1);
490: #endif
491: row = im[i];
492: v_t = v + i*nbs2;
493: if (row >= rstart && row < rend) {
494: for (j=0; j<n; j++) {
495: col = in[j];
497: /* Look up into the Hash Table */
498: key = row*Nbs+col+1;
499: h1 = HASH(size,key,tmp);
500:
501: idx = h1;
502: #if defined(PETSC_USE_DEBUG)
503: total_ct++;
504: insert_ct++;
505: if (HT[idx] != key) {
506: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
507: if (idx == size) {
508: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
509: if (idx == h1) {
510: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
511: }
512: }
513: }
514: #else
515: if (HT[idx] != key) {
516: for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
517: if (idx == size) {
518: for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
519: if (idx == h1) {
520: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col);
521: }
522: }
523: }
524: #endif
525: baij_a = HD[idx];
526: if (roworiented) {
527: /*value = v + i*(stepval+bs)*bs + j*bs;*/
528: /* value = v + (i*(stepval+bs)+j)*bs; */
529: value = v_t;
530: v_t += bs;
531: if (addv == ADD_VALUES) {
532: for (ii=0; ii<bs; ii++,value+=stepval) {
533: for (jj=ii; jj<bs2; jj+=bs) {
534: baij_a[jj] += *value++;
535: }
536: }
537: } else {
538: for (ii=0; ii<bs; ii++,value+=stepval) {
539: for (jj=ii; jj<bs2; jj+=bs) {
540: baij_a[jj] = *value++;
541: }
542: }
543: }
544: } else {
545: value = v + j*(stepval+bs)*bs + i*bs;
546: if (addv == ADD_VALUES) {
547: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
548: for (jj=0; jj<bs; jj++) {
549: baij_a[jj] += *value++;
550: }
551: }
552: } else {
553: for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
554: for (jj=0; jj<bs; jj++) {
555: baij_a[jj] = *value++;
556: }
557: }
558: }
559: }
560: }
561: } else {
562: if (!baij->donotstash) {
563: if (roworiented) {
564: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
565: } else {
566: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
567: }
568: }
569: }
570: }
571: #if defined(PETSC_USE_DEBUG)
572: baij->ht_total_ct = total_ct;
573: baij->ht_insert_ct = insert_ct;
574: #endif
575: return(0);
576: }
580: PetscErrorCode MatGetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
581: {
582: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
584: PetscInt bs=mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend;
585: PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data;
588: for (i=0; i<m; i++) {
589: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
590: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
591: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
592: row = idxm[i] - bsrstart;
593: for (j=0; j<n; j++) {
594: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
595: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
596: if (idxn[j] >= bscstart && idxn[j] < bscend){
597: col = idxn[j] - bscstart;
598: MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
599: } else {
600: if (!baij->colmap) {
601: MatCreateColmap_MPIBAIJ_Private(mat);
602: }
603: #if defined (PETSC_USE_CTABLE)
604: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
605: data --;
606: #else
607: data = baij->colmap[idxn[j]/bs]-1;
608: #endif
609: if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
610: else {
611: col = data + idxn[j]%bs;
612: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
613: }
614: }
615: }
616: } else {
617: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
618: }
619: }
620: return(0);
621: }
625: PetscErrorCode MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm)
626: {
627: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
628: Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data;
630: PetscInt i,j,bs2=baij->bs2,bs=baij->A->rmap->bs,nz,row,col;
631: PetscReal sum = 0.0;
632: MatScalar *v;
635: if (baij->size == 1) {
636: MatNorm(baij->A,type,nrm);
637: } else {
638: if (type == NORM_FROBENIUS) {
639: v = amat->a;
640: nz = amat->nz*bs2;
641: for (i=0; i<nz; i++) {
642: #if defined(PETSC_USE_COMPLEX)
643: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
644: #else
645: sum += (*v)*(*v); v++;
646: #endif
647: }
648: v = bmat->a;
649: nz = bmat->nz*bs2;
650: for (i=0; i<nz; i++) {
651: #if defined(PETSC_USE_COMPLEX)
652: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
653: #else
654: sum += (*v)*(*v); v++;
655: #endif
656: }
657: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
658: *nrm = PetscSqrtReal(*nrm);
659: } else if (type == NORM_1) { /* max column sum */
660: PetscReal *tmp,*tmp2;
661: PetscInt *jj,*garray=baij->garray,cstart=baij->rstartbs;
662: PetscMalloc2(mat->cmap->N,PetscReal,&tmp,mat->cmap->N,PetscReal,&tmp2);
663: PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));
664: v = amat->a; jj = amat->j;
665: for (i=0; i<amat->nz; i++) {
666: for (j=0; j<bs; j++){
667: col = bs*(cstart + *jj) + j; /* column index */
668: for (row=0; row<bs; row++){
669: tmp[col] += PetscAbsScalar(*v); v++;
670: }
671: }
672: jj++;
673: }
674: v = bmat->a; jj = bmat->j;
675: for (i=0; i<bmat->nz; i++) {
676: for (j=0; j<bs; j++){
677: col = bs*garray[*jj] + j;
678: for (row=0; row<bs; row++){
679: tmp[col] += PetscAbsScalar(*v); v++;
680: }
681: }
682: jj++;
683: }
684: MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
685: *nrm = 0.0;
686: for (j=0; j<mat->cmap->N; j++) {
687: if (tmp2[j] > *nrm) *nrm = tmp2[j];
688: }
689: PetscFree2(tmp,tmp2);
690: } else if (type == NORM_INFINITY) { /* max row sum */
691: PetscReal *sums;
692: PetscMalloc(bs*sizeof(PetscReal),&sums);
693: sum = 0.0;
694: for (j=0; j<amat->mbs; j++) {
695: for (row=0; row<bs; row++) sums[row] = 0.0;
696: v = amat->a + bs2*amat->i[j];
697: nz = amat->i[j+1]-amat->i[j];
698: for (i=0; i<nz; i++) {
699: for (col=0; col<bs; col++){
700: for (row=0; row<bs; row++){
701: sums[row] += PetscAbsScalar(*v); v++;
702: }
703: }
704: }
705: v = bmat->a + bs2*bmat->i[j];
706: nz = bmat->i[j+1]-bmat->i[j];
707: for (i=0; i<nz; i++) {
708: for (col=0; col<bs; col++){
709: for (row=0; row<bs; row++){
710: sums[row] += PetscAbsScalar(*v); v++;
711: }
712: }
713: }
714: for (row=0; row<bs; row++){
715: if (sums[row] > sum) sum = sums[row];
716: }
717: }
718: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_MAX,((PetscObject)mat)->comm);
719: PetscFree(sums);
720: } else SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_SUP,"No support for this norm yet");
721: }
722: return(0);
723: }
725: /*
726: Creates the hash table, and sets the table
727: This table is created only once.
728: If new entried need to be added to the matrix
729: then the hash table has to be destroyed and
730: recreated.
731: */
734: PetscErrorCode MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor)
735: {
736: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
737: Mat A = baij->A,B=baij->B;
738: Mat_SeqBAIJ *a=(Mat_SeqBAIJ *)A->data,*b=(Mat_SeqBAIJ *)B->data;
739: PetscInt i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
741: PetscInt ht_size,bs2=baij->bs2,rstart=baij->rstartbs;
742: PetscInt cstart=baij->cstartbs,*garray=baij->garray,row,col,Nbs=baij->Nbs;
743: PetscInt *HT,key;
744: MatScalar **HD;
745: PetscReal tmp;
746: #if defined(PETSC_USE_INFO)
747: PetscInt ct=0,max=0;
748: #endif
751: if (baij->ht) return(0);
753: baij->ht_size = (PetscInt)(factor*nz);
754: ht_size = baij->ht_size;
755:
756: /* Allocate Memory for Hash Table */
757: PetscMalloc2(ht_size,MatScalar*,&baij->hd,ht_size,PetscInt,&baij->ht);
758: PetscMemzero(baij->hd,ht_size*sizeof(MatScalar*));
759: PetscMemzero(baij->ht,ht_size*sizeof(PetscInt));
760: HD = baij->hd;
761: HT = baij->ht;
763: /* Loop Over A */
764: for (i=0; i<a->mbs; i++) {
765: for (j=ai[i]; j<ai[i+1]; j++) {
766: row = i+rstart;
767: col = aj[j]+cstart;
768:
769: key = row*Nbs + col + 1;
770: h1 = HASH(ht_size,key,tmp);
771: for (k=0; k<ht_size; k++){
772: if (!HT[(h1+k)%ht_size]) {
773: HT[(h1+k)%ht_size] = key;
774: HD[(h1+k)%ht_size] = a->a + j*bs2;
775: break;
776: #if defined(PETSC_USE_INFO)
777: } else {
778: ct++;
779: #endif
780: }
781: }
782: #if defined(PETSC_USE_INFO)
783: if (k> max) max = k;
784: #endif
785: }
786: }
787: /* Loop Over B */
788: for (i=0; i<b->mbs; i++) {
789: for (j=bi[i]; j<bi[i+1]; j++) {
790: row = i+rstart;
791: col = garray[bj[j]];
792: key = row*Nbs + col + 1;
793: h1 = HASH(ht_size,key,tmp);
794: for (k=0; k<ht_size; k++){
795: if (!HT[(h1+k)%ht_size]) {
796: HT[(h1+k)%ht_size] = key;
797: HD[(h1+k)%ht_size] = b->a + j*bs2;
798: break;
799: #if defined(PETSC_USE_INFO)
800: } else {
801: ct++;
802: #endif
803: }
804: }
805: #if defined(PETSC_USE_INFO)
806: if (k> max) max = k;
807: #endif
808: }
809: }
810:
811: /* Print Summary */
812: #if defined(PETSC_USE_INFO)
813: for (i=0,j=0; i<ht_size; i++) {
814: if (HT[i]) {j++;}
815: }
816: PetscInfo2(mat,"Average Search = %5.2f,max search = %D\n",(!j)? 0.0:((PetscReal)(ct+j))/j,max);
817: #endif
818: return(0);
819: }
823: PetscErrorCode MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode)
824: {
825: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
827: PetscInt nstash,reallocs;
828: InsertMode addv;
831: if (baij->donotstash || mat->nooffprocentries) {
832: return(0);
833: }
835: /* make sure all processors are either in INSERTMODE or ADDMODE */
836: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);
837: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
838: mat->insertmode = addv; /* in case this processor had no cache */
840: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
841: MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);
842: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
843: PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
844: MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);
845: PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
846: return(0);
847: }
851: PetscErrorCode MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode)
852: {
853: Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data;
854: Mat_SeqBAIJ *a=(Mat_SeqBAIJ*)baij->A->data;
856: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
857: PetscInt *row,*col;
858: PetscBool r1,r2,r3,other_disassembled;
859: MatScalar *val;
860: InsertMode addv = mat->insertmode;
861: PetscMPIInt n;
863: /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */
865: if (!baij->donotstash && !mat->nooffprocentries) {
866: while (1) {
867: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
868: if (!flg) break;
870: for (i=0; i<n;) {
871: /* Now identify the consecutive vals belonging to the same row */
872: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
873: if (j < n) ncols = j-i;
874: else ncols = n-i;
875: /* Now assemble all these values with a single function call */
876: MatSetValues_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
877: i = j;
878: }
879: }
880: MatStashScatterEnd_Private(&mat->stash);
881: /* Now process the block-stash. Since the values are stashed column-oriented,
882: set the roworiented flag to column oriented, and after MatSetValues()
883: restore the original flags */
884: r1 = baij->roworiented;
885: r2 = a->roworiented;
886: r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
887: baij->roworiented = PETSC_FALSE;
888: a->roworiented = PETSC_FALSE;
889: (((Mat_SeqBAIJ*)baij->B->data))->roworiented = PETSC_FALSE; /* b->roworiented */
890: while (1) {
891: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
892: if (!flg) break;
893:
894: for (i=0; i<n;) {
895: /* Now identify the consecutive vals belonging to the same row */
896: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
897: if (j < n) ncols = j-i;
898: else ncols = n-i;
899: MatSetValuesBlocked_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
900: i = j;
901: }
902: }
903: MatStashScatterEnd_Private(&mat->bstash);
904: baij->roworiented = r1;
905: a->roworiented = r2;
906: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworiented */
907: }
908:
909: MatAssemblyBegin(baij->A,mode);
910: MatAssemblyEnd(baij->A,mode);
912: /* determine if any processor has disassembled, if so we must
913: also disassemble ourselfs, in order that we may reassemble. */
914: /*
915: if nonzero structure of submatrix B cannot change then we know that
916: no processor disassembled thus we can skip this stuff
917: */
918: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
919: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);
920: if (mat->was_assembled && !other_disassembled) {
921: MatDisAssemble_MPIBAIJ(mat);
922: }
923: }
925: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
926: MatSetUpMultiply_MPIBAIJ(mat);
927: }
928: MatSetOption(baij->B,MAT_CHECK_COMPRESSED_ROW,PETSC_FALSE);
929: MatAssemblyBegin(baij->B,mode);
930: MatAssemblyEnd(baij->B,mode);
931:
932: #if defined(PETSC_USE_INFO)
933: if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
934: PetscInfo1(mat,"Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
935: baij->ht_total_ct = 0;
936: baij->ht_insert_ct = 0;
937: }
938: #endif
939: if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
940: MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);
941: mat->ops->setvalues = MatSetValues_MPIBAIJ_HT;
942: mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT;
943: }
945: PetscFree2(baij->rowvalues,baij->rowindices);
946: baij->rowvalues = 0;
947: return(0);
948: }
952: static PetscErrorCode MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
953: {
954: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
955: PetscErrorCode ierr;
956: PetscMPIInt size = baij->size,rank = baij->rank;
957: PetscInt bs = mat->rmap->bs;
958: PetscBool iascii,isdraw;
959: PetscViewer sviewer;
960: PetscViewerFormat format;
963: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
964: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
965: if (iascii) {
966: PetscViewerGetFormat(viewer,&format);
967: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
968: MatInfo info;
969: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
970: MatGetInfo(mat,MAT_LOCAL,&info);
971: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
972: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
973: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,mat->rmap->bs,(PetscInt)info.memory);
974: MatGetInfo(baij->A,MAT_LOCAL,&info);
975: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
976: MatGetInfo(baij->B,MAT_LOCAL,&info);
977: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
978: PetscViewerFlush(viewer);
979: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
980: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
981: VecScatterView(baij->Mvctx,viewer);
982: return(0);
983: } else if (format == PETSC_VIEWER_ASCII_INFO) {
984: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
985: return(0);
986: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
987: return(0);
988: }
989: }
991: if (isdraw) {
992: PetscDraw draw;
993: PetscBool isnull;
994: PetscViewerDrawGetDraw(viewer,0,&draw);
995: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
996: }
998: if (size == 1) {
999: PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);
1000: MatView(baij->A,viewer);
1001: } else {
1002: /* assemble the entire matrix onto first processor. */
1003: Mat A;
1004: Mat_SeqBAIJ *Aloc;
1005: PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
1006: MatScalar *a;
1008: /* Here we are creating a temporary matrix, so will assume MPIBAIJ is acceptable */
1009: /* Perhaps this should be the type of mat? */
1010: MatCreate(((PetscObject)mat)->comm,&A);
1011: if (!rank) {
1012: MatSetSizes(A,M,N,M,N);
1013: } else {
1014: MatSetSizes(A,0,0,M,N);
1015: }
1016: MatSetType(A,MATMPIBAIJ);
1017: MatMPIBAIJSetPreallocation(A,mat->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);
1018: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
1019: PetscLogObjectParent(mat,A);
1021: /* copy over the A part */
1022: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1023: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1024: PetscMalloc(bs*sizeof(PetscInt),&rvals);
1026: for (i=0; i<mbs; i++) {
1027: rvals[0] = bs*(baij->rstartbs + i);
1028: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1029: for (j=ai[i]; j<ai[i+1]; j++) {
1030: col = (baij->cstartbs+aj[j])*bs;
1031: for (k=0; k<bs; k++) {
1032: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
1033: col++; a += bs;
1034: }
1035: }
1036: }
1037: /* copy over the B part */
1038: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1039: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1040: for (i=0; i<mbs; i++) {
1041: rvals[0] = bs*(baij->rstartbs + i);
1042: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1043: for (j=ai[i]; j<ai[i+1]; j++) {
1044: col = baij->garray[aj[j]]*bs;
1045: for (k=0; k<bs; k++) {
1046: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
1047: col++; a += bs;
1048: }
1049: }
1050: }
1051: PetscFree(rvals);
1052: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1053: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1054: /*
1055: Everyone has to call to draw the matrix since the graphics waits are
1056: synchronized across all processors that share the PetscDraw object
1057: */
1058: PetscViewerGetSingleton(viewer,&sviewer);
1059: if (!rank) {
1060: PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,((PetscObject)mat)->name);
1061: /* Set the type name to MATMPIBAIJ so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqBAIJ_ASCII()*/
1062: PetscStrcpy(((PetscObject)((Mat_MPIBAIJ*)(A->data))->A)->type_name,MATMPIBAIJ);
1063: MatView(((Mat_MPIBAIJ*)(A->data))->A,sviewer);
1064: }
1065: PetscViewerRestoreSingleton(viewer,&sviewer);
1066: MatDestroy(&A);
1067: }
1068: return(0);
1069: }
1073: static PetscErrorCode MatView_MPIBAIJ_Binary(Mat mat,PetscViewer viewer)
1074: {
1075: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)mat->data;
1076: Mat_SeqBAIJ* A = (Mat_SeqBAIJ*)a->A->data;
1077: Mat_SeqBAIJ* B = (Mat_SeqBAIJ*)a->B->data;
1079: PetscInt i,*row_lens,*crow_lens,bs = mat->rmap->bs,j,k,bs2=a->bs2,header[4],nz,rlen;
1080: PetscInt *range=0,nzmax,*column_indices,cnt,col,*garray = a->garray,cstart = mat->cmap->rstart/bs,len,pcnt,l,ll;
1081: int fd;
1082: PetscScalar *column_values;
1083: FILE *file;
1084: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag;
1085: PetscInt message_count,flowcontrolcount;
1088: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
1089: MPI_Comm_size(((PetscObject)mat)->comm,&size);
1090: nz = bs2*(A->nz + B->nz);
1091: rlen = mat->rmap->n;
1092: if (!rank) {
1093: header[0] = MAT_FILE_CLASSID;
1094: header[1] = mat->rmap->N;
1095: header[2] = mat->cmap->N;
1096: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);
1097: PetscViewerBinaryGetDescriptor(viewer,&fd);
1098: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
1099: /* get largest number of rows any processor has */
1100: range = mat->rmap->range;
1101: for (i=1; i<size; i++) {
1102: rlen = PetscMax(rlen,range[i+1] - range[i]);
1103: }
1104: } else {
1105: MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);
1106: }
1108: PetscMalloc((rlen/bs)*sizeof(PetscInt),&crow_lens);
1109: /* compute lengths of each row */
1110: for (i=0; i<a->mbs; i++) {
1111: crow_lens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1112: }
1113: /* store the row lengths to the file */
1114: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1115: if (!rank) {
1116: MPI_Status status;
1117: PetscMalloc(rlen*sizeof(PetscInt),&row_lens);
1118: rlen = (range[1] - range[0])/bs;
1119: for (i=0; i<rlen; i++) {
1120: for (j=0; j<bs; j++) {
1121: row_lens[i*bs+j] = bs*crow_lens[i];
1122: }
1123: }
1124: PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);
1125: for (i=1; i<size; i++) {
1126: rlen = (range[i+1] - range[i])/bs;
1127: PetscViewerFlowControlStepMaster(viewer,i,message_count,flowcontrolcount);
1128: MPI_Recv(crow_lens,rlen,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
1129: for (k=0; k<rlen; k++) {
1130: for (j=0; j<bs; j++) {
1131: row_lens[k*bs+j] = bs*crow_lens[k];
1132: }
1133: }
1134: PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);
1135: }
1136: PetscViewerFlowControlEndMaster(viewer,message_count);
1137: PetscFree(row_lens);
1138: } else {
1139: PetscViewerFlowControlStepWorker(viewer,rank,message_count);
1140: MPI_Send(crow_lens,mat->rmap->n/bs,MPIU_INT,0,tag,((PetscObject)mat)->comm);
1141: PetscViewerFlowControlEndWorker(viewer,message_count);
1142: }
1143: PetscFree(crow_lens);
1145: /* load up the local column indices. Include for all rows not just one for each block row since process 0 does not have the
1146: information needed to make it for each row from a block row. This does require more communication but still not more than
1147: the communication needed for the nonzero values */
1148: nzmax = nz; /* space a largest processor needs */
1149: MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,((PetscObject)mat)->comm);
1150: PetscMalloc(nzmax*sizeof(PetscInt),&column_indices);
1151: cnt = 0;
1152: for (i=0; i<a->mbs; i++) {
1153: pcnt = cnt;
1154: for (j=B->i[i]; j<B->i[i+1]; j++) {
1155: if ( (col = garray[B->j[j]]) > cstart) break;
1156: for (l=0; l<bs; l++) {
1157: column_indices[cnt++] = bs*col+l;
1158: }
1159: }
1160: for (k=A->i[i]; k<A->i[i+1]; k++) {
1161: for (l=0; l<bs; l++) {
1162: column_indices[cnt++] = bs*(A->j[k] + cstart)+l;
1163: }
1164: }
1165: for (; j<B->i[i+1]; j++) {
1166: for (l=0; l<bs; l++) {
1167: column_indices[cnt++] = bs*garray[B->j[j]]+l;
1168: }
1169: }
1170: len = cnt - pcnt;
1171: for (k=1; k<bs; k++) {
1172: PetscMemcpy(&column_indices[cnt],&column_indices[pcnt],len*sizeof(PetscInt));
1173: cnt += len;
1174: }
1175: }
1176: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1178: /* store the columns to the file */
1179: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1180: if (!rank) {
1181: MPI_Status status;
1182: PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);
1183: for (i=1; i<size; i++) {
1184: PetscViewerFlowControlStepMaster(viewer,i,message_count,flowcontrolcount);
1185: MPI_Recv(&cnt,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
1186: MPI_Recv(column_indices,cnt,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
1187: PetscBinaryWrite(fd,column_indices,cnt,PETSC_INT,PETSC_TRUE);
1188: }
1189: PetscViewerFlowControlEndMaster(viewer,message_count);
1190: } else {
1191: PetscViewerFlowControlStepWorker(viewer,rank,message_count);
1192: MPI_Send(&cnt,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);
1193: MPI_Send(column_indices,cnt,MPIU_INT,0,tag,((PetscObject)mat)->comm);
1194: PetscViewerFlowControlEndWorker(viewer,message_count);
1195: }
1196: PetscFree(column_indices);
1198: /* load up the numerical values */
1199: PetscMalloc(nzmax*sizeof(PetscScalar),&column_values);
1200: cnt = 0;
1201: for (i=0; i<a->mbs; i++) {
1202: rlen = bs*(B->i[i+1] - B->i[i] + A->i[i+1] - A->i[i]);
1203: for (j=B->i[i]; j<B->i[i+1]; j++) {
1204: if ( garray[B->j[j]] > cstart) break;
1205: for (l=0; l<bs; l++) {
1206: for (ll=0; ll<bs; ll++) {
1207: column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll];
1208: }
1209: }
1210: cnt += bs;
1211: }
1212: for (k=A->i[i]; k<A->i[i+1]; k++) {
1213: for (l=0; l<bs; l++) {
1214: for (ll=0; ll<bs; ll++) {
1215: column_values[cnt + l*rlen + ll] = A->a[bs2*k+l+bs*ll];
1216: }
1217: }
1218: cnt += bs;
1219: }
1220: for (; j<B->i[i+1]; j++) {
1221: for (l=0; l<bs; l++) {
1222: for (ll=0; ll<bs; ll++) {
1223: column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll];
1224: }
1225: }
1226: cnt += bs;
1227: }
1228: cnt += (bs-1)*rlen;
1229: }
1230: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1232: /* store the column values to the file */
1233: PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);
1234: if (!rank) {
1235: MPI_Status status;
1236: PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);
1237: for (i=1; i<size; i++) {
1238: PetscViewerFlowControlStepMaster(viewer,i,message_count,flowcontrolcount);
1239: MPI_Recv(&cnt,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);
1240: MPI_Recv(column_values,cnt,MPIU_SCALAR,i,tag,((PetscObject)mat)->comm,&status);
1241: PetscBinaryWrite(fd,column_values,cnt,PETSC_SCALAR,PETSC_TRUE);
1242: }
1243: PetscViewerFlowControlEndMaster(viewer,message_count);
1244: } else {
1245: PetscViewerFlowControlStepWorker(viewer,rank,message_count);
1246: MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);
1247: MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);
1248: PetscViewerFlowControlEndWorker(viewer,message_count);
1249: }
1250: PetscFree(column_values);
1252: PetscViewerBinaryGetInfoPointer(viewer,&file);
1253: if (file) {
1254: fprintf(file,"-matload_block_size %d\n",(int)mat->rmap->bs);
1255: }
1256: return(0);
1257: }
1261: PetscErrorCode MatView_MPIBAIJ(Mat mat,PetscViewer viewer)
1262: {
1264: PetscBool iascii,isdraw,issocket,isbinary;
1267: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1268: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1269: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1270: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1271: if (iascii || isdraw || issocket) {
1272: MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);
1273: } else if (isbinary) {
1274: MatView_MPIBAIJ_Binary(mat,viewer);
1275: } else {
1276: SETERRQ1(((PetscObject)mat)->comm,PETSC_ERR_SUP,"Viewer type %s not supported by MPIBAIJ matrices",((PetscObject)viewer)->type_name);
1277: }
1278: return(0);
1279: }
1283: PetscErrorCode MatDestroy_MPIBAIJ(Mat mat)
1284: {
1285: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1289: #if defined(PETSC_USE_LOG)
1290: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N);
1291: #endif
1292: MatStashDestroy_Private(&mat->stash);
1293: MatStashDestroy_Private(&mat->bstash);
1294: MatDestroy(&baij->A);
1295: MatDestroy(&baij->B);
1296: #if defined (PETSC_USE_CTABLE)
1297: PetscTableDestroy(&baij->colmap);
1298: #else
1299: PetscFree(baij->colmap);
1300: #endif
1301: PetscFree(baij->garray);
1302: VecDestroy(&baij->lvec);
1303: VecScatterDestroy(&baij->Mvctx);
1304: PetscFree2(baij->rowvalues,baij->rowindices);
1305: PetscFree(baij->barray);
1306: PetscFree2(baij->hd,baij->ht);
1307: PetscFree(baij->rangebs);
1308: PetscFree(mat->data);
1310: PetscObjectChangeTypeName((PetscObject)mat,0);
1311: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
1312: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
1313: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
1314: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocation_C","",PETSC_NULL);
1315: PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocationCSR_C","",PETSC_NULL);
1316: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);
1317: PetscObjectComposeFunction((PetscObject)mat,"MatSetHashTableFactor_C","",PETSC_NULL);
1318: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpisbaij_C","",PETSC_NULL);
1319: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpibstrm_C","",PETSC_NULL);
1320: return(0);
1321: }
1325: PetscErrorCode MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy)
1326: {
1327: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1329: PetscInt nt;
1332: VecGetLocalSize(xx,&nt);
1333: if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1334: VecGetLocalSize(yy,&nt);
1335: if (nt != A->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1336: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1337: (*a->A->ops->mult)(a->A,xx,yy);
1338: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1339: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1340: return(0);
1341: }
1345: PetscErrorCode MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1346: {
1347: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1351: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1352: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1353: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1354: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1355: return(0);
1356: }
1360: PetscErrorCode MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy)
1361: {
1362: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1364: PetscBool merged;
1367: VecScatterGetMerged(a->Mvctx,&merged);
1368: /* do nondiagonal part */
1369: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1370: if (!merged) {
1371: /* send it on its way */
1372: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1373: /* do local part */
1374: (*a->A->ops->multtranspose)(a->A,xx,yy);
1375: /* receive remote parts: note this assumes the values are not actually */
1376: /* inserted in yy until the next line */
1377: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1378: } else {
1379: /* do local part */
1380: (*a->A->ops->multtranspose)(a->A,xx,yy);
1381: /* send it on its way */
1382: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1383: /* values actually were received in the Begin() but we need to call this nop */
1384: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1385: }
1386: return(0);
1387: }
1391: PetscErrorCode MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1392: {
1393: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1397: /* do nondiagonal part */
1398: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1399: /* send it on its way */
1400: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1401: /* do local part */
1402: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1403: /* receive remote parts: note this assumes the values are not actually */
1404: /* inserted in yy until the next line, which is true for my implementation*/
1405: /* but is not perhaps always true. */
1406: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1407: return(0);
1408: }
1410: /*
1411: This only works correctly for square matrices where the subblock A->A is the
1412: diagonal block
1413: */
1416: PetscErrorCode MatGetDiagonal_MPIBAIJ(Mat A,Vec v)
1417: {
1418: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1422: if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1423: MatGetDiagonal(a->A,v);
1424: return(0);
1425: }
1429: PetscErrorCode MatScale_MPIBAIJ(Mat A,PetscScalar aa)
1430: {
1431: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1435: MatScale(a->A,aa);
1436: MatScale(a->B,aa);
1437: return(0);
1438: }
1442: PetscErrorCode MatGetRow_MPIBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1443: {
1444: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data;
1445: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1447: PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
1448: PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend;
1449: PetscInt *cmap,*idx_p,cstart = mat->cstartbs;
1452: if (row < brstart || row >= brend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local rows");
1453: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1454: mat->getrowactive = PETSC_TRUE;
1456: if (!mat->rowvalues && (idx || v)) {
1457: /*
1458: allocate enough space to hold information from the longest row.
1459: */
1460: Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data;
1461: PetscInt max = 1,mbs = mat->mbs,tmp;
1462: for (i=0; i<mbs; i++) {
1463: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1464: if (max < tmp) { max = tmp; }
1465: }
1466: PetscMalloc2(max*bs2,PetscScalar,&mat->rowvalues,max*bs2,PetscInt,&mat->rowindices);
1467: }
1468: lrow = row - brstart;
1470: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1471: if (!v) {pvA = 0; pvB = 0;}
1472: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1473: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1474: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1475: nztot = nzA + nzB;
1477: cmap = mat->garray;
1478: if (v || idx) {
1479: if (nztot) {
1480: /* Sort by increasing column numbers, assuming A and B already sorted */
1481: PetscInt imark = -1;
1482: if (v) {
1483: *v = v_p = mat->rowvalues;
1484: for (i=0; i<nzB; i++) {
1485: if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i];
1486: else break;
1487: }
1488: imark = i;
1489: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1490: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1491: }
1492: if (idx) {
1493: *idx = idx_p = mat->rowindices;
1494: if (imark > -1) {
1495: for (i=0; i<imark; i++) {
1496: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1497: }
1498: } else {
1499: for (i=0; i<nzB; i++) {
1500: if (cmap[cworkB[i]/bs] < cstart)
1501: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1502: else break;
1503: }
1504: imark = i;
1505: }
1506: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i];
1507: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1508: }
1509: } else {
1510: if (idx) *idx = 0;
1511: if (v) *v = 0;
1512: }
1513: }
1514: *nz = nztot;
1515: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1516: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1517: return(0);
1518: }
1522: PetscErrorCode MatRestoreRow_MPIBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1523: {
1524: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1527: if (!baij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1528: baij->getrowactive = PETSC_FALSE;
1529: return(0);
1530: }
1534: PetscErrorCode MatZeroEntries_MPIBAIJ(Mat A)
1535: {
1536: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1540: MatZeroEntries(l->A);
1541: MatZeroEntries(l->B);
1542: return(0);
1543: }
1547: PetscErrorCode MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1548: {
1549: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data;
1550: Mat A = a->A,B = a->B;
1552: PetscReal isend[5],irecv[5];
1555: info->block_size = (PetscReal)matin->rmap->bs;
1556: MatGetInfo(A,MAT_LOCAL,info);
1557: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1558: isend[3] = info->memory; isend[4] = info->mallocs;
1559: MatGetInfo(B,MAT_LOCAL,info);
1560: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1561: isend[3] += info->memory; isend[4] += info->mallocs;
1562: if (flag == MAT_LOCAL) {
1563: info->nz_used = isend[0];
1564: info->nz_allocated = isend[1];
1565: info->nz_unneeded = isend[2];
1566: info->memory = isend[3];
1567: info->mallocs = isend[4];
1568: } else if (flag == MAT_GLOBAL_MAX) {
1569: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,((PetscObject)matin)->comm);
1570: info->nz_used = irecv[0];
1571: info->nz_allocated = irecv[1];
1572: info->nz_unneeded = irecv[2];
1573: info->memory = irecv[3];
1574: info->mallocs = irecv[4];
1575: } else if (flag == MAT_GLOBAL_SUM) {
1576: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,((PetscObject)matin)->comm);
1577: info->nz_used = irecv[0];
1578: info->nz_allocated = irecv[1];
1579: info->nz_unneeded = irecv[2];
1580: info->memory = irecv[3];
1581: info->mallocs = irecv[4];
1582: } else {
1583: SETERRQ1(((PetscObject)matin)->comm,PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1584: }
1585: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1586: info->fill_ratio_needed = 0;
1587: info->factor_mallocs = 0;
1588: return(0);
1589: }
1593: PetscErrorCode MatSetOption_MPIBAIJ(Mat A,MatOption op,PetscBool flg)
1594: {
1595: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1599: switch (op) {
1600: case MAT_NEW_NONZERO_LOCATIONS:
1601: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1602: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1603: case MAT_KEEP_NONZERO_PATTERN:
1604: case MAT_NEW_NONZERO_LOCATION_ERR:
1605: MatSetOption(a->A,op,flg);
1606: MatSetOption(a->B,op,flg);
1607: break;
1608: case MAT_ROW_ORIENTED:
1609: a->roworiented = flg;
1610: MatSetOption(a->A,op,flg);
1611: MatSetOption(a->B,op,flg);
1612: break;
1613: case MAT_NEW_DIAGONALS:
1614: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1615: break;
1616: case MAT_IGNORE_OFF_PROC_ENTRIES:
1617: a->donotstash = flg;
1618: break;
1619: case MAT_USE_HASH_TABLE:
1620: a->ht_flag = flg;
1621: break;
1622: case MAT_SYMMETRIC:
1623: case MAT_STRUCTURALLY_SYMMETRIC:
1624: case MAT_HERMITIAN:
1625: case MAT_SYMMETRY_ETERNAL:
1626: MatSetOption(a->A,op,flg);
1627: break;
1628: default:
1629: SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_SUP,"unknown option %d",op);
1630: }
1631: return(0);
1632: }
1636: PetscErrorCode MatTranspose_MPIBAIJ(Mat A,MatReuse reuse,Mat *matout)
1637: {
1638: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data;
1639: Mat_SeqBAIJ *Aloc;
1640: Mat B;
1642: PetscInt M=A->rmap->N,N=A->cmap->N,*ai,*aj,i,*rvals,j,k,col;
1643: PetscInt bs=A->rmap->bs,mbs=baij->mbs;
1644: MatScalar *a;
1645:
1647: if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1648: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1649: MatCreate(((PetscObject)A)->comm,&B);
1650: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
1651: MatSetType(B,((PetscObject)A)->type_name);
1652: /* Do not know preallocation information, but must set block size */
1653: MatMPIBAIJSetPreallocation(B,A->rmap->bs,PETSC_DECIDE,PETSC_NULL,PETSC_DECIDE,PETSC_NULL);
1654: } else {
1655: B = *matout;
1656: }
1658: /* copy over the A part */
1659: Aloc = (Mat_SeqBAIJ*)baij->A->data;
1660: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1661: PetscMalloc(bs*sizeof(PetscInt),&rvals);
1662:
1663: for (i=0; i<mbs; i++) {
1664: rvals[0] = bs*(baij->rstartbs + i);
1665: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1666: for (j=ai[i]; j<ai[i+1]; j++) {
1667: col = (baij->cstartbs+aj[j])*bs;
1668: for (k=0; k<bs; k++) {
1669: MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);
1670: col++; a += bs;
1671: }
1672: }
1673: }
1674: /* copy over the B part */
1675: Aloc = (Mat_SeqBAIJ*)baij->B->data;
1676: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1677: for (i=0; i<mbs; i++) {
1678: rvals[0] = bs*(baij->rstartbs + i);
1679: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1680: for (j=ai[i]; j<ai[i+1]; j++) {
1681: col = baij->garray[aj[j]]*bs;
1682: for (k=0; k<bs; k++) {
1683: MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);
1684: col++; a += bs;
1685: }
1686: }
1687: }
1688: PetscFree(rvals);
1689: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1690: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1691:
1692: if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
1693: *matout = B;
1694: } else {
1695: MatHeaderMerge(A,B);
1696: }
1697: return(0);
1698: }
1702: PetscErrorCode MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr)
1703: {
1704: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1705: Mat a = baij->A,b = baij->B;
1707: PetscInt s1,s2,s3;
1710: MatGetLocalSize(mat,&s2,&s3);
1711: if (rr) {
1712: VecGetLocalSize(rr,&s1);
1713: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1714: /* Overlap communication with computation. */
1715: VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1716: }
1717: if (ll) {
1718: VecGetLocalSize(ll,&s1);
1719: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1720: (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1721: }
1722: /* scale the diagonal block */
1723: (*a->ops->diagonalscale)(a,ll,rr);
1725: if (rr) {
1726: /* Do a scatter end and then right scale the off-diagonal block */
1727: VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1728: (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1729: }
1730:
1731: return(0);
1732: }
1736: PetscErrorCode MatZeroRows_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
1737: {
1738: Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1739: PetscErrorCode ierr;
1740: PetscMPIInt imdex,size = l->size,n,rank = l->rank;
1741: PetscInt i,*owners = A->rmap->range;
1742: PetscInt *nprocs,j,idx,nsends,row;
1743: PetscInt nmax,*svalues,*starts,*owner,nrecvs;
1744: PetscInt *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source,lastidx = -1;
1745: PetscInt *lens,*lrows,*values,rstart_bs=A->rmap->rstart;
1746: MPI_Comm comm = ((PetscObject)A)->comm;
1747: MPI_Request *send_waits,*recv_waits;
1748: MPI_Status recv_status,*send_status;
1749: const PetscScalar *xx;
1750: PetscScalar *bb;
1751: #if defined(PETSC_DEBUG)
1752: PetscBool found = PETSC_FALSE;
1753: #endif
1754:
1756: /* first count number of contributors to each processor */
1757: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
1758: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
1759: PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
1760: j = 0;
1761: for (i=0; i<N; i++) {
1762: if (lastidx > (idx = rows[i])) j = 0;
1763: lastidx = idx;
1764: for (; j<size; j++) {
1765: if (idx >= owners[j] && idx < owners[j+1]) {
1766: nprocs[2*j]++;
1767: nprocs[2*j+1] = 1;
1768: owner[i] = j;
1769: #if defined(PETSC_DEBUG)
1770: found = PETSC_TRUE;
1771: #endif
1772: break;
1773: }
1774: }
1775: #if defined(PETSC_DEBUG)
1776: if (!found) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1777: found = PETSC_FALSE;
1778: #endif
1779: }
1780: nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
1781:
1782: if (A->nooffproczerorows) {
1783: if (nsends > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"You called MatSetOption(,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) but set an off process zero row");
1784: nrecvs = nsends;
1785: nmax = N;
1786: } else {
1787: /* inform other processors of number of messages and max length*/
1788: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
1789: }
1790:
1791: /* post receives: */
1792: PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
1793: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
1794: for (i=0; i<nrecvs; i++) {
1795: MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
1796: }
1797:
1798: /* do sends:
1799: 1) starts[i] gives the starting index in svalues for stuff going to
1800: the ith processor
1801: */
1802: PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
1803: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
1804: PetscMalloc((size+1)*sizeof(PetscInt),&starts);
1805: starts[0] = 0;
1806: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1807: for (i=0; i<N; i++) {
1808: svalues[starts[owner[i]]++] = rows[i];
1809: }
1810:
1811: starts[0] = 0;
1812: for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1813: count = 0;
1814: for (i=0; i<size; i++) {
1815: if (nprocs[2*i+1]) {
1816: MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
1817: }
1818: }
1819: PetscFree(starts);
1821: base = owners[rank];
1822:
1823: /* wait on receives */
1824: PetscMalloc2(nrecvs+1,PetscInt,&lens,nrecvs+1,PetscInt,&source);
1825: count = nrecvs;
1826: slen = 0;
1827: while (count) {
1828: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
1829: /* unpack receives into our local space */
1830: MPI_Get_count(&recv_status,MPIU_INT,&n);
1831: source[imdex] = recv_status.MPI_SOURCE;
1832: lens[imdex] = n;
1833: slen += n;
1834: count--;
1835: }
1836: PetscFree(recv_waits);
1837:
1838: /* move the data into the send scatter */
1839: PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
1840: count = 0;
1841: for (i=0; i<nrecvs; i++) {
1842: values = rvalues + i*nmax;
1843: for (j=0; j<lens[i]; j++) {
1844: lrows[count++] = values[j] - base;
1845: }
1846: }
1847: PetscFree(rvalues);
1848: PetscFree2(lens,source);
1849: PetscFree(owner);
1850: PetscFree(nprocs);
1851:
1852: /* fix right hand side if needed */
1853: if (x && b) {
1854: VecGetArrayRead(x,&xx);
1855: VecGetArray(b,&bb);
1856: for (i=0; i<slen; i++) {
1857: bb[lrows[i]] = diag*xx[lrows[i]];
1858: }
1859: VecRestoreArrayRead(x,&xx);
1860: VecRestoreArray(b,&bb);
1861: }
1863: /* actually zap the local rows */
1864: /*
1865: Zero the required rows. If the "diagonal block" of the matrix
1866: is square and the user wishes to set the diagonal we use separate
1867: code so that MatSetValues() is not called for each diagonal allocating
1868: new memory, thus calling lots of mallocs and slowing things down.
1870: */
1871: /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1872: MatZeroRows_SeqBAIJ(l->B,slen,lrows,0.0,0,0);
1873: if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) {
1874: MatZeroRows_SeqBAIJ(l->A,slen,lrows,diag,0,0);
1875: } else if (diag != 0.0) {
1876: MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0,0,0);
1877: if (((Mat_SeqBAIJ*)l->A->data)->nonew) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1878: MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1879: for (i=0; i<slen; i++) {
1880: row = lrows[i] + rstart_bs;
1881: MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);
1882: }
1883: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
1884: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
1885: } else {
1886: MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0,0,0);
1887: }
1889: PetscFree(lrows);
1891: /* wait on sends */
1892: if (nsends) {
1893: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
1894: MPI_Waitall(nsends,send_waits,send_status);
1895: PetscFree(send_status);
1896: }
1897: PetscFree(send_waits);
1898: PetscFree(svalues);
1900: return(0);
1901: }
1905: PetscErrorCode MatSetUnfactored_MPIBAIJ(Mat A)
1906: {
1907: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1911: MatSetUnfactored(a->A);
1912: return(0);
1913: }
1915: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat *);
1919: PetscErrorCode MatEqual_MPIBAIJ(Mat A,Mat B,PetscBool *flag)
1920: {
1921: Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data;
1922: Mat a,b,c,d;
1923: PetscBool flg;
1927: a = matA->A; b = matA->B;
1928: c = matB->A; d = matB->B;
1930: MatEqual(a,c,&flg);
1931: if (flg) {
1932: MatEqual(b,d,&flg);
1933: }
1934: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
1935: return(0);
1936: }
1940: PetscErrorCode MatCopy_MPIBAIJ(Mat A,Mat B,MatStructure str)
1941: {
1943: Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data;
1944: Mat_MPIBAIJ *b = (Mat_MPIBAIJ *)B->data;
1947: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1948: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1949: MatCopy_Basic(A,B,str);
1950: } else {
1951: MatCopy(a->A,b->A,str);
1952: MatCopy(a->B,b->B,str);
1953: }
1954: return(0);
1955: }
1959: PetscErrorCode MatSetUp_MPIBAIJ(Mat A)
1960: {
1964: MatMPIBAIJSetPreallocation(A,A->rmap->bs,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1965: return(0);
1966: }
1970: PetscErrorCode MatAXPY_MPIBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1971: {
1973: Mat_MPIBAIJ *xx=(Mat_MPIBAIJ *)X->data,*yy=(Mat_MPIBAIJ *)Y->data;
1974: PetscBLASInt bnz,one=1;
1975: Mat_SeqBAIJ *x,*y;
1978: if (str == SAME_NONZERO_PATTERN) {
1979: PetscScalar alpha = a;
1980: x = (Mat_SeqBAIJ *)xx->A->data;
1981: y = (Mat_SeqBAIJ *)yy->A->data;
1982: bnz = PetscBLASIntCast(x->nz);
1983: BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1984: x = (Mat_SeqBAIJ *)xx->B->data;
1985: y = (Mat_SeqBAIJ *)yy->B->data;
1986: bnz = PetscBLASIntCast(x->nz);
1987: BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1988: } else {
1989: MatAXPY_Basic(Y,a,X,str);
1990: }
1991: return(0);
1992: }
1996: PetscErrorCode MatRealPart_MPIBAIJ(Mat A)
1997: {
1998: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2002: MatRealPart(a->A);
2003: MatRealPart(a->B);
2004: return(0);
2005: }
2009: PetscErrorCode MatImaginaryPart_MPIBAIJ(Mat A)
2010: {
2011: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
2015: MatImaginaryPart(a->A);
2016: MatImaginaryPart(a->B);
2017: return(0);
2018: }
2022: PetscErrorCode MatGetSubMatrix_MPIBAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
2023: {
2025: IS iscol_local;
2026: PetscInt csize;
2029: ISGetLocalSize(iscol,&csize);
2030: if (call == MAT_REUSE_MATRIX) {
2031: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
2032: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
2033: } else {
2034: ISAllGather(iscol,&iscol_local);
2035: }
2036: MatGetSubMatrix_MPIBAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);
2037: if (call == MAT_INITIAL_MATRIX) {
2038: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
2039: ISDestroy(&iscol_local);
2040: }
2041: return(0);
2042: }
2043: extern PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,PetscBool*,Mat*);
2046: /*
2047: Not great since it makes two copies of the submatrix, first an SeqBAIJ
2048: in local and then by concatenating the local matrices the end result.
2049: Writing it directly would be much like MatGetSubMatrices_MPIBAIJ()
2050: */
2051: PetscErrorCode MatGetSubMatrix_MPIBAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
2052: {
2054: PetscMPIInt rank,size;
2055: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs;
2056: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal,ncol,nrow;
2057: Mat M,Mreuse;
2058: MatScalar *vwork,*aa;
2059: MPI_Comm comm = ((PetscObject)mat)->comm;
2060: IS isrow_new, iscol_new;
2061: PetscBool idflag,allrows, allcols;
2062: Mat_SeqBAIJ *aij;
2066: MPI_Comm_rank(comm,&rank);
2067: MPI_Comm_size(comm,&size);
2068: /* The compression and expansion should be avoided. Doesn't point
2069: out errors, might change the indices, hence buggey */
2070: ISCompressIndicesGeneral(mat->rmap->N,mat->rmap->n,mat->rmap->bs,1,&isrow,&isrow_new);
2071: ISCompressIndicesGeneral(mat->cmap->N,mat->cmap->n,mat->cmap->bs,1,&iscol,&iscol_new);
2073: /* Check for special case: each processor gets entire matrix columns */
2074: ISIdentity(iscol,&idflag);
2075: ISGetLocalSize(iscol,&ncol);
2076: if (idflag && ncol == mat->cmap->N){
2077: allcols = PETSC_TRUE;
2078: } else {
2079: allcols = PETSC_FALSE;
2080: }
2082: ISIdentity(isrow,&idflag);
2083: ISGetLocalSize(isrow,&nrow);
2084: if (idflag && nrow == mat->rmap->N){
2085: allrows = PETSC_TRUE;
2086: } else {
2087: allrows = PETSC_FALSE;
2088: }
2089: if (call == MAT_REUSE_MATRIX) {
2090: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject *)&Mreuse);
2091: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
2092: MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_REUSE_MATRIX,&allrows,&allcols,&Mreuse);
2093: } else {
2094: MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_INITIAL_MATRIX,&allrows,&allcols,&Mreuse);
2095: }
2096: ISDestroy(&isrow_new);
2097: ISDestroy(&iscol_new);
2098: /*
2099: m - number of local rows
2100: n - number of columns (same on all processors)
2101: rstart - first row in new global matrix generated
2102: */
2103: MatGetBlockSize(mat,&bs);
2104: MatGetSize(Mreuse,&m,&n);
2105: m = m/bs;
2106: n = n/bs;
2107:
2108: if (call == MAT_INITIAL_MATRIX) {
2109: aij = (Mat_SeqBAIJ*)(Mreuse)->data;
2110: ii = aij->i;
2111: jj = aij->j;
2113: /*
2114: Determine the number of non-zeros in the diagonal and off-diagonal
2115: portions of the matrix in order to do correct preallocation
2116: */
2118: /* first get start and end of "diagonal" columns */
2119: if (csize == PETSC_DECIDE) {
2120: ISGetSize(isrow,&mglobal);
2121: if (mglobal == n*bs) { /* square matrix */
2122: nlocal = m;
2123: } else {
2124: nlocal = n/size + ((n % size) > rank);
2125: }
2126: } else {
2127: nlocal = csize/bs;
2128: }
2129: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
2130: rstart = rend - nlocal;
2131: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
2133: /* next, compute all the lengths */
2134: PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);
2135: olens = dlens + m;
2136: for (i=0; i<m; i++) {
2137: jend = ii[i+1] - ii[i];
2138: olen = 0;
2139: dlen = 0;
2140: for (j=0; j<jend; j++) {
2141: if (*jj < rstart || *jj >= rend) olen++;
2142: else dlen++;
2143: jj++;
2144: }
2145: olens[i] = olen;
2146: dlens[i] = dlen;
2147: }
2148: MatCreate(comm,&M);
2149: MatSetSizes(M,bs*m,bs*nlocal,PETSC_DECIDE,bs*n);
2150: MatSetType(M,((PetscObject)mat)->type_name);
2151: MatMPIBAIJSetPreallocation(M,bs,0,dlens,0,olens);
2152: PetscFree(dlens);
2153: } else {
2154: PetscInt ml,nl;
2156: M = *newmat;
2157: MatGetLocalSize(M,&ml,&nl);
2158: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
2159: MatZeroEntries(M);
2160: /*
2161: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
2162: rather than the slower MatSetValues().
2163: */
2164: M->was_assembled = PETSC_TRUE;
2165: M->assembled = PETSC_FALSE;
2166: }
2167: MatSetOption(M,MAT_ROW_ORIENTED,PETSC_FALSE);
2168: MatGetOwnershipRange(M,&rstart,&rend);
2169: aij = (Mat_SeqBAIJ*)(Mreuse)->data;
2170: ii = aij->i;
2171: jj = aij->j;
2172: aa = aij->a;
2173: for (i=0; i<m; i++) {
2174: row = rstart/bs + i;
2175: nz = ii[i+1] - ii[i];
2176: cwork = jj; jj += nz;
2177: vwork = aa; aa += nz*bs*bs;
2178: MatSetValuesBlocked_MPIBAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
2179: }
2181: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
2182: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
2183: *newmat = M;
2185: /* save submatrix used in processor for next request */
2186: if (call == MAT_INITIAL_MATRIX) {
2187: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
2188: PetscObjectDereference((PetscObject)Mreuse);
2189: }
2191: return(0);
2192: }
2196: PetscErrorCode MatPermute_MPIBAIJ(Mat A,IS rowp,IS colp,Mat *B)
2197: {
2198: MPI_Comm comm,pcomm;
2199: PetscInt first,local_size,nrows;
2200: const PetscInt *rows;
2201: PetscMPIInt size;
2202: IS crowp,growp,irowp,lrowp,lcolp,icolp;
2206: PetscObjectGetComm((PetscObject)A,&comm);
2207: /* make a collective version of 'rowp' */
2208: PetscObjectGetComm((PetscObject)rowp,&pcomm);
2209: if (pcomm==comm) {
2210: crowp = rowp;
2211: } else {
2212: ISGetSize(rowp,&nrows);
2213: ISGetIndices(rowp,&rows);
2214: ISCreateGeneral(comm,nrows,rows,PETSC_COPY_VALUES,&crowp);
2215: ISRestoreIndices(rowp,&rows);
2216: }
2217: /* collect the global row permutation and invert it */
2218: ISAllGather(crowp,&growp);
2219: ISSetPermutation(growp);
2220: if (pcomm!=comm) {
2221: ISDestroy(&crowp);
2222: }
2223: ISInvertPermutation(growp,PETSC_DECIDE,&irowp);
2224: /* get the local target indices */
2225: MatGetOwnershipRange(A,&first,PETSC_NULL);
2226: MatGetLocalSize(A,&local_size,PETSC_NULL);
2227: ISGetIndices(irowp,&rows);
2228: ISCreateGeneral(MPI_COMM_SELF,local_size,rows+first,PETSC_COPY_VALUES,&lrowp);
2229: ISRestoreIndices(irowp,&rows);
2230: ISDestroy(&irowp);
2231: /* the column permutation is so much easier;
2232: make a local version of 'colp' and invert it */
2233: PetscObjectGetComm((PetscObject)colp,&pcomm);
2234: MPI_Comm_size(pcomm,&size);
2235: if (size==1) {
2236: lcolp = colp;
2237: } else {
2238: ISGetSize(colp,&nrows);
2239: ISGetIndices(colp,&rows);
2240: ISCreateGeneral(MPI_COMM_SELF,nrows,rows,PETSC_COPY_VALUES,&lcolp);
2241: }
2242: ISSetPermutation(lcolp);
2243: ISInvertPermutation(lcolp,PETSC_DECIDE,&icolp);
2244: ISSetPermutation(icolp);
2245: if (size>1) {
2246: ISRestoreIndices(colp,&rows);
2247: ISDestroy(&lcolp);
2248: }
2249: /* now we just get the submatrix */
2250: MatGetSubMatrix_MPIBAIJ_Private(A,lrowp,icolp,local_size,MAT_INITIAL_MATRIX,B);
2251: /* clean up */
2252: ISDestroy(&lrowp);
2253: ISDestroy(&icolp);
2254: return(0);
2255: }
2259: PetscErrorCode MatGetGhosts_MPIBAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
2260: {
2261: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*) mat->data;
2262: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
2265: if (nghosts) { *nghosts = B->nbs;}
2266: if (ghosts) {*ghosts = baij->garray;}
2267: return(0);
2268: }
2270: extern PetscErrorCode MatCreateColmap_MPIBAIJ_Private(Mat);
2274: /*
2275: This routine is almost identical to MatFDColoringCreate_MPIBAIJ()!
2276: */
2277: PetscErrorCode MatFDColoringCreate_MPIBAIJ(Mat mat,ISColoring iscoloring,MatFDColoring c)
2278: {
2279: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
2280: PetscErrorCode ierr;
2281: PetscMPIInt size,*ncolsonproc,*disp,nn;
2282: PetscInt bs,i,n,nrows,j,k,m,*rows = 0,*A_ci,*A_cj,ncols,col;
2283: const PetscInt *is;
2284: PetscInt nis = iscoloring->n,nctot,*cols,*B_ci,*B_cj;
2285: PetscInt *rowhit,M,cstart,cend,colb;
2286: PetscInt *columnsforrow,l;
2287: IS *isa;
2288: PetscBool done,flg;
2289: ISLocalToGlobalMapping map = mat->cmap->bmapping;
2290: PetscInt *ltog = (map ? map->indices : (PetscInt*) PETSC_NULL) ,ctype=c->ctype;
2293: if (!mat->assembled) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be assembled first; MatAssemblyBegin/End();");
2294: if (ctype == IS_COLORING_GHOSTED && !map) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_INCOMP,"When using ghosted differencing matrix must have local to global mapping provided with MatSetLocalToGlobalMappingBlock");
2296: ISColoringGetIS(iscoloring,PETSC_IGNORE,&isa);
2297: MatGetBlockSize(mat,&bs);
2298: M = mat->rmap->n/bs;
2299: cstart = mat->cmap->rstart/bs;
2300: cend = mat->cmap->rend/bs;
2301: c->M = mat->rmap->N/bs; /* set the global rows and columns and local rows */
2302: c->N = mat->cmap->N/bs;
2303: c->m = mat->rmap->n/bs;
2304: c->rstart = mat->rmap->rstart/bs;
2306: c->ncolors = nis;
2307: PetscMalloc(nis*sizeof(PetscInt),&c->ncolumns);
2308: PetscMalloc(nis*sizeof(PetscInt*),&c->columns);
2309: PetscMalloc(nis*sizeof(PetscInt),&c->nrows);
2310: PetscMalloc(nis*sizeof(PetscInt*),&c->rows);
2311: PetscMalloc(nis*sizeof(PetscInt*),&c->columnsforrow);
2312: PetscLogObjectMemory(c,5*nis*sizeof(PetscInt));
2314: /* Allow access to data structures of local part of matrix */
2315: if (!baij->colmap) {
2316: MatCreateColmap_MPIBAIJ_Private(mat);
2317: }
2318: MatGetColumnIJ(baij->A,0,PETSC_FALSE,PETSC_FALSE,&ncols,&A_ci,&A_cj,&done);
2319: MatGetColumnIJ(baij->B,0,PETSC_FALSE,PETSC_FALSE,&ncols,&B_ci,&B_cj,&done);
2320:
2321: PetscMalloc((M+1)*sizeof(PetscInt),&rowhit);
2322: PetscMalloc((M+1)*sizeof(PetscInt),&columnsforrow);
2324: for (i=0; i<nis; i++) {
2325: ISGetLocalSize(isa[i],&n);
2326: ISGetIndices(isa[i],&is);
2327: c->ncolumns[i] = n;
2328: if (n) {
2329: PetscMalloc(n*sizeof(PetscInt),&c->columns[i]);
2330: PetscLogObjectMemory(c,n*sizeof(PetscInt));
2331: PetscMemcpy(c->columns[i],is,n*sizeof(PetscInt));
2332: } else {
2333: c->columns[i] = 0;
2334: }
2336: if (ctype == IS_COLORING_GLOBAL){
2337: /* Determine the total (parallel) number of columns of this color */
2338: MPI_Comm_size(((PetscObject)mat)->comm,&size);
2339: PetscMalloc2(size,PetscMPIInt,&ncolsonproc,size,PetscMPIInt,&disp);
2341: nn = PetscMPIIntCast(n);
2342: MPI_Allgather(&nn,1,MPI_INT,ncolsonproc,1,MPI_INT,((PetscObject)mat)->comm);
2343: nctot = 0; for (j=0; j<size; j++) {nctot += ncolsonproc[j];}
2344: if (!nctot) {
2345: PetscInfo(mat,"Coloring of matrix has some unneeded colors with no corresponding rows\n");
2346: }
2348: disp[0] = 0;
2349: for (j=1; j<size; j++) {
2350: disp[j] = disp[j-1] + ncolsonproc[j-1];
2351: }
2353: /* Get complete list of columns for color on each processor */
2354: PetscMalloc((nctot+1)*sizeof(PetscInt),&cols);
2355: MPI_Allgatherv((void*)is,n,MPIU_INT,cols,ncolsonproc,disp,MPIU_INT,((PetscObject)mat)->comm);
2356: PetscFree2(ncolsonproc,disp);
2357: } else if (ctype == IS_COLORING_GHOSTED){
2358: /* Determine local number of columns of this color on this process, including ghost points */
2359: nctot = n;
2360: PetscMalloc((nctot+1)*sizeof(PetscInt),&cols);
2361: PetscMemcpy(cols,is,n*sizeof(PetscInt));
2362: } else {
2363: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not provided for this MatFDColoring type");
2364: }
2366: /*
2367: Mark all rows affect by these columns
2368: */
2369: /* Temporary option to allow for debugging/testing */
2370: flg = PETSC_FALSE;
2371: PetscOptionsGetBool(PETSC_NULL,"-matfdcoloring_slow",&flg,PETSC_NULL);
2372: if (!flg) {/*-----------------------------------------------------------------------------*/
2373: /* crude, fast version */
2374: PetscMemzero(rowhit,M*sizeof(PetscInt));
2375: /* loop over columns*/
2376: for (j=0; j<nctot; j++) {
2377: if (ctype == IS_COLORING_GHOSTED) {
2378: col = ltog[cols[j]];
2379: } else {
2380: col = cols[j];
2381: }
2382: if (col >= cstart && col < cend) {
2383: /* column is in diagonal block of matrix */
2384: rows = A_cj + A_ci[col-cstart];
2385: m = A_ci[col-cstart+1] - A_ci[col-cstart];
2386: } else {
2387: #if defined (PETSC_USE_CTABLE)
2388: PetscTableFind(baij->colmap,col+1,&colb);
2389: colb --;
2390: #else
2391: colb = baij->colmap[col] - 1;
2392: #endif
2393: if (colb == -1) {
2394: m = 0;
2395: } else {
2396: colb = colb/bs;
2397: rows = B_cj + B_ci[colb];
2398: m = B_ci[colb+1] - B_ci[colb];
2399: }
2400: }
2401: /* loop over columns marking them in rowhit */
2402: for (k=0; k<m; k++) {
2403: rowhit[*rows++] = col + 1;
2404: }
2405: }
2407: /* count the number of hits */
2408: nrows = 0;
2409: for (j=0; j<M; j++) {
2410: if (rowhit[j]) nrows++;
2411: }
2412: c->nrows[i] = nrows;
2413: PetscMalloc((nrows+1)*sizeof(PetscInt),&c->rows[i]);
2414: PetscMalloc((nrows+1)*sizeof(PetscInt),&c->columnsforrow[i]);
2415: PetscLogObjectMemory(c,2*(nrows+1)*sizeof(PetscInt));
2416: nrows = 0;
2417: for (j=0; j<M; j++) {
2418: if (rowhit[j]) {
2419: c->rows[i][nrows] = j;
2420: c->columnsforrow[i][nrows] = rowhit[j] - 1;
2421: nrows++;
2422: }
2423: }
2424: } else {/*-------------------------------------------------------------------------------*/
2425: /* slow version, using rowhit as a linked list */
2426: PetscInt currentcol,fm,mfm;
2427: rowhit[M] = M;
2428: nrows = 0;
2429: /* loop over columns*/
2430: for (j=0; j<nctot; j++) {
2431: if (ctype == IS_COLORING_GHOSTED) {
2432: col = ltog[cols[j]];
2433: } else {
2434: col = cols[j];
2435: }
2436: if (col >= cstart && col < cend) {
2437: /* column is in diagonal block of matrix */
2438: rows = A_cj + A_ci[col-cstart];
2439: m = A_ci[col-cstart+1] - A_ci[col-cstart];
2440: } else {
2441: #if defined (PETSC_USE_CTABLE)
2442: PetscTableFind(baij->colmap,col+1,&colb);
2443: colb --;
2444: #else
2445: colb = baij->colmap[col] - 1;
2446: #endif
2447: if (colb == -1) {
2448: m = 0;
2449: } else {
2450: colb = colb/bs;
2451: rows = B_cj + B_ci[colb];
2452: m = B_ci[colb+1] - B_ci[colb];
2453: }
2454: }
2456: /* loop over columns marking them in rowhit */
2457: fm = M; /* fm points to first entry in linked list */
2458: for (k=0; k<m; k++) {
2459: currentcol = *rows++;
2460: /* is it already in the list? */
2461: do {
2462: mfm = fm;
2463: fm = rowhit[fm];
2464: } while (fm < currentcol);
2465: /* not in list so add it */
2466: if (fm != currentcol) {
2467: nrows++;
2468: columnsforrow[currentcol] = col;
2469: /* next three lines insert new entry into linked list */
2470: rowhit[mfm] = currentcol;
2471: rowhit[currentcol] = fm;
2472: fm = currentcol;
2473: /* fm points to present position in list since we know the columns are sorted */
2474: } else {
2475: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Invalid coloring of matrix detected");
2476: }
2477: }
2478: }
2479: c->nrows[i] = nrows;
2480: PetscMalloc((nrows+1)*sizeof(PetscInt),&c->rows[i]);
2481: PetscMalloc((nrows+1)*sizeof(PetscInt),&c->columnsforrow[i]);
2482: PetscLogObjectMemory(c,(nrows+1)*sizeof(PetscInt));
2483: /* now store the linked list of rows into c->rows[i] */
2484: nrows = 0;
2485: fm = rowhit[M];
2486: do {
2487: c->rows[i][nrows] = fm;
2488: c->columnsforrow[i][nrows++] = columnsforrow[fm];
2489: fm = rowhit[fm];
2490: } while (fm < M);
2491: } /* ---------------------------------------------------------------------------------------*/
2492: PetscFree(cols);
2493: }
2495: /* Optimize by adding the vscale, and scaleforrow[][] fields */
2496: /*
2497: vscale will contain the "diagonal" on processor scalings followed by the off processor
2498: */
2499: if (ctype == IS_COLORING_GLOBAL) {
2500: PetscInt *garray;
2501: PetscMalloc(baij->B->cmap->n*sizeof(PetscInt),&garray);
2502: for (i=0; i<baij->B->cmap->n/bs; i++) {
2503: for (j=0; j<bs; j++) {
2504: garray[i*bs+j] = bs*baij->garray[i]+j;
2505: }
2506: }
2507: VecCreateGhost(((PetscObject)mat)->comm,baij->A->rmap->n,PETSC_DETERMINE,baij->B->cmap->n,garray,&c->vscale);
2508: PetscFree(garray);
2509: CHKMEMQ;
2510: PetscMalloc(c->ncolors*sizeof(PetscInt*),&c->vscaleforrow);
2511: for (k=0; k<c->ncolors; k++) {
2512: PetscMalloc((c->nrows[k]+1)*sizeof(PetscInt),&c->vscaleforrow[k]);
2513: for (l=0; l<c->nrows[k]; l++) {
2514: col = c->columnsforrow[k][l];
2515: if (col >= cstart && col < cend) {
2516: /* column is in diagonal block of matrix */
2517: colb = col - cstart;
2518: } else {
2519: /* column is in "off-processor" part */
2520: #if defined (PETSC_USE_CTABLE)
2521: PetscTableFind(baij->colmap,col+1,&colb);
2522: colb --;
2523: #else
2524: colb = baij->colmap[col] - 1;
2525: #endif
2526: colb = colb/bs;
2527: colb += cend - cstart;
2528: }
2529: c->vscaleforrow[k][l] = colb;
2530: }
2531: }
2532: } else if (ctype == IS_COLORING_GHOSTED) {
2533: /* Get gtol mapping */
2534: PetscInt N = mat->cmap->N, *gtol;
2535: PetscMalloc((N+1)*sizeof(PetscInt),>ol);
2536: for (i=0; i<N; i++) gtol[i] = -1;
2537: for (i=0; i<map->n; i++) gtol[ltog[i]] = i;
2538:
2539: c->vscale = 0; /* will be created in MatFDColoringApply() */
2540: PetscMalloc(c->ncolors*sizeof(PetscInt*),&c->vscaleforrow);
2541: for (k=0; k<c->ncolors; k++) {
2542: PetscMalloc((c->nrows[k]+1)*sizeof(PetscInt),&c->vscaleforrow[k]);
2543: for (l=0; l<c->nrows[k]; l++) {
2544: col = c->columnsforrow[k][l]; /* global column index */
2545: c->vscaleforrow[k][l] = gtol[col]; /* local column index */
2546: }
2547: }
2548: PetscFree(gtol);
2549: }
2550: ISColoringRestoreIS(iscoloring,&isa);
2552: PetscFree(rowhit);
2553: PetscFree(columnsforrow);
2554: MatRestoreColumnIJ(baij->A,0,PETSC_FALSE,PETSC_FALSE,&ncols,&A_ci,&A_cj,&done);
2555: MatRestoreColumnIJ(baij->B,0,PETSC_FALSE,PETSC_FALSE,&ncols,&B_ci,&B_cj,&done);
2556: CHKMEMQ;
2557: return(0);
2558: }
2562: PetscErrorCode MatGetSeqNonzeroStructure_MPIBAIJ(Mat A,Mat *newmat)
2563: {
2564: Mat B;
2565: Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data;
2566: Mat_SeqBAIJ *ad = (Mat_SeqBAIJ*)a->A->data,*bd = (Mat_SeqBAIJ*)a->B->data;
2567: Mat_SeqAIJ *b;
2569: PetscMPIInt size,rank,*recvcounts = 0,*displs = 0;
2570: PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j,bs = A->rmap->bs;
2571: PetscInt m,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf;
2574: MPI_Comm_size(((PetscObject)A)->comm,&size);
2575: MPI_Comm_rank(((PetscObject)A)->comm,&rank);
2577: /* ----------------------------------------------------------------
2578: Tell every processor the number of nonzeros per row
2579: */
2580: PetscMalloc((A->rmap->N/bs)*sizeof(PetscInt),&lens);
2581: for (i=A->rmap->rstart/bs; i<A->rmap->rend/bs; i++) {
2582: lens[i] = ad->i[i-A->rmap->rstart/bs+1] - ad->i[i-A->rmap->rstart/bs] + bd->i[i-A->rmap->rstart/bs+1] - bd->i[i-A->rmap->rstart/bs];
2583: }
2584: sendcount = A->rmap->rend/bs - A->rmap->rstart/bs;
2585: PetscMalloc(2*size*sizeof(PetscMPIInt),&recvcounts);
2586: displs = recvcounts + size;
2587: for (i=0; i<size; i++) {
2588: recvcounts[i] = A->rmap->range[i+1]/bs - A->rmap->range[i]/bs;
2589: displs[i] = A->rmap->range[i]/bs;
2590: }
2591: #if defined(PETSC_HAVE_MPI_IN_PLACE)
2592: MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);
2593: #else
2594: MPI_Allgatherv(lens+A->rmap->rstart/bs,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);
2595: #endif
2596: /* ---------------------------------------------------------------
2597: Create the sequential matrix of the same type as the local block diagonal
2598: */
2599: MatCreate(PETSC_COMM_SELF,&B);
2600: MatSetSizes(B,A->rmap->N/bs,A->cmap->N/bs,PETSC_DETERMINE,PETSC_DETERMINE);
2601: MatSetType(B,MATSEQAIJ);
2602: MatSeqAIJSetPreallocation(B,0,lens);
2603: b = (Mat_SeqAIJ *)B->data;
2605: /*--------------------------------------------------------------------
2606: Copy my part of matrix column indices over
2607: */
2608: sendcount = ad->nz + bd->nz;
2609: jsendbuf = b->j + b->i[rstarts[rank]/bs];
2610: a_jsendbuf = ad->j;
2611: b_jsendbuf = bd->j;
2612: n = A->rmap->rend/bs - A->rmap->rstart/bs;
2613: cnt = 0;
2614: for (i=0; i<n; i++) {
2616: /* put in lower diagonal portion */
2617: m = bd->i[i+1] - bd->i[i];
2618: while (m > 0) {
2619: /* is it above diagonal (in bd (compressed) numbering) */
2620: if (garray[*b_jsendbuf] > A->rmap->rstart/bs + i) break;
2621: jsendbuf[cnt++] = garray[*b_jsendbuf++];
2622: m--;
2623: }
2625: /* put in diagonal portion */
2626: for (j=ad->i[i]; j<ad->i[i+1]; j++) {
2627: jsendbuf[cnt++] = A->rmap->rstart/bs + *a_jsendbuf++;
2628: }
2630: /* put in upper diagonal portion */
2631: while (m-- > 0) {
2632: jsendbuf[cnt++] = garray[*b_jsendbuf++];
2633: }
2634: }
2635: if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt);
2637: /*--------------------------------------------------------------------
2638: Gather all column indices to all processors
2639: */
2640: for (i=0; i<size; i++) {
2641: recvcounts[i] = 0;
2642: for (j=A->rmap->range[i]/bs; j<A->rmap->range[i+1]/bs; j++) {
2643: recvcounts[i] += lens[j];
2644: }
2645: }
2646: displs[0] = 0;
2647: for (i=1; i<size; i++) {
2648: displs[i] = displs[i-1] + recvcounts[i-1];
2649: }
2650: #if defined(PETSC_HAVE_MPI_IN_PLACE)
2651: MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);
2652: #else
2653: MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);
2654: #endif
2655: /*--------------------------------------------------------------------
2656: Assemble the matrix into useable form (note numerical values not yet set)
2657: */
2658: /* set the b->ilen (length of each row) values */
2659: PetscMemcpy(b->ilen,lens,(A->rmap->N/bs)*sizeof(PetscInt));
2660: /* set the b->i indices */
2661: b->i[0] = 0;
2662: for (i=1; i<=A->rmap->N/bs; i++) {
2663: b->i[i] = b->i[i-1] + lens[i-1];
2664: }
2665: PetscFree(lens);
2666: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2667: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2668: PetscFree(recvcounts);
2670: if (A->symmetric){
2671: MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);
2672: } else if (A->hermitian) {
2673: MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);
2674: } else if (A->structurally_symmetric) {
2675: MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
2676: }
2677: *newmat = B;
2678: return(0);
2679: }
2683: PetscErrorCode MatSOR_MPIBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2684: {
2685: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data;
2687: Vec bb1 = 0;
2690: if (flag == SOR_APPLY_UPPER) {
2691: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2692: return(0);
2693: }
2695: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS) {
2696: VecDuplicate(bb,&bb1);
2697: }
2699: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2700: if (flag & SOR_ZERO_INITIAL_GUESS) {
2701: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2702: its--;
2703: }
2704:
2705: while (its--) {
2706: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2707: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2709: /* update rhs: bb1 = bb - B*x */
2710: VecScale(mat->lvec,-1.0);
2711: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2713: /* local sweep */
2714: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
2715: }
2716: } else if (flag & SOR_LOCAL_FORWARD_SWEEP){
2717: if (flag & SOR_ZERO_INITIAL_GUESS) {
2718: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2719: its--;
2720: }
2721: while (its--) {
2722: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2723: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2725: /* update rhs: bb1 = bb - B*x */
2726: VecScale(mat->lvec,-1.0);
2727: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2729: /* local sweep */
2730: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
2731: }
2732: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP){
2733: if (flag & SOR_ZERO_INITIAL_GUESS) {
2734: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2735: its--;
2736: }
2737: while (its--) {
2738: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2739: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2741: /* update rhs: bb1 = bb - B*x */
2742: VecScale(mat->lvec,-1.0);
2743: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
2745: /* local sweep */
2746: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
2747: }
2748: } else SETERRQ(((PetscObject)matin)->comm,PETSC_ERR_SUP,"Parallel version of SOR requested not supported");
2750: VecDestroy(&bb1);
2751: return(0);
2752: }
2754: extern PetscErrorCode MatFDColoringApply_BAIJ(Mat,MatFDColoring,Vec,MatStructure*,void*);
2758: PetscErrorCode MatInvertBlockDiagonal_MPIBAIJ(Mat A,const PetscScalar **values)
2759: {
2760: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*) A->data;
2764: MatInvertBlockDiagonal(a->A,values);
2765: return(0);
2766: }
2769: /* -------------------------------------------------------------------*/
2770: static struct _MatOps MatOps_Values = {
2771: MatSetValues_MPIBAIJ,
2772: MatGetRow_MPIBAIJ,
2773: MatRestoreRow_MPIBAIJ,
2774: MatMult_MPIBAIJ,
2775: /* 4*/ MatMultAdd_MPIBAIJ,
2776: MatMultTranspose_MPIBAIJ,
2777: MatMultTransposeAdd_MPIBAIJ,
2778: 0,
2779: 0,
2780: 0,
2781: /*10*/ 0,
2782: 0,
2783: 0,
2784: MatSOR_MPIBAIJ,
2785: MatTranspose_MPIBAIJ,
2786: /*15*/ MatGetInfo_MPIBAIJ,
2787: MatEqual_MPIBAIJ,
2788: MatGetDiagonal_MPIBAIJ,
2789: MatDiagonalScale_MPIBAIJ,
2790: MatNorm_MPIBAIJ,
2791: /*20*/ MatAssemblyBegin_MPIBAIJ,
2792: MatAssemblyEnd_MPIBAIJ,
2793: MatSetOption_MPIBAIJ,
2794: MatZeroEntries_MPIBAIJ,
2795: /*24*/ MatZeroRows_MPIBAIJ,
2796: 0,
2797: 0,
2798: 0,
2799: 0,
2800: /*29*/ MatSetUp_MPIBAIJ,
2801: 0,
2802: 0,
2803: 0,
2804: 0,
2805: /*34*/ MatDuplicate_MPIBAIJ,
2806: 0,
2807: 0,
2808: 0,
2809: 0,
2810: /*39*/ MatAXPY_MPIBAIJ,
2811: MatGetSubMatrices_MPIBAIJ,
2812: MatIncreaseOverlap_MPIBAIJ,
2813: MatGetValues_MPIBAIJ,
2814: MatCopy_MPIBAIJ,
2815: /*44*/ 0,
2816: MatScale_MPIBAIJ,
2817: 0,
2818: 0,
2819: 0,
2820: /*49*/ 0,
2821: 0,
2822: 0,
2823: 0,
2824: 0,
2825: /*54*/ MatFDColoringCreate_MPIBAIJ,
2826: 0,
2827: MatSetUnfactored_MPIBAIJ,
2828: MatPermute_MPIBAIJ,
2829: MatSetValuesBlocked_MPIBAIJ,
2830: /*59*/ MatGetSubMatrix_MPIBAIJ,
2831: MatDestroy_MPIBAIJ,
2832: MatView_MPIBAIJ,
2833: 0,
2834: 0,
2835: /*64*/ 0,
2836: 0,
2837: 0,
2838: 0,
2839: 0,
2840: /*69*/ MatGetRowMaxAbs_MPIBAIJ,
2841: 0,
2842: 0,
2843: 0,
2844: 0,
2845: /*74*/ 0,
2846: MatFDColoringApply_BAIJ,
2847: 0,
2848: 0,
2849: 0,
2850: /*79*/ 0,
2851: 0,
2852: 0,
2853: 0,
2854: MatLoad_MPIBAIJ,
2855: /*84*/ 0,
2856: 0,
2857: 0,
2858: 0,
2859: 0,
2860: /*89*/ 0,
2861: 0,
2862: 0,
2863: 0,
2864: 0,
2865: /*94*/ 0,
2866: 0,
2867: 0,
2868: 0,
2869: 0,
2870: /*99*/ 0,
2871: 0,
2872: 0,
2873: 0,
2874: 0,
2875: /*104*/0,
2876: MatRealPart_MPIBAIJ,
2877: MatImaginaryPart_MPIBAIJ,
2878: 0,
2879: 0,
2880: /*109*/0,
2881: 0,
2882: 0,
2883: 0,
2884: 0,
2885: /*114*/MatGetSeqNonzeroStructure_MPIBAIJ,
2886: 0,
2887: MatGetGhosts_MPIBAIJ,
2888: 0,
2889: 0,
2890: /*119*/0,
2891: 0,
2892: 0,
2893: 0,
2894: 0,
2895: /*124*/0,
2896: 0,
2897: MatInvertBlockDiagonal_MPIBAIJ
2898: };
2900: EXTERN_C_BEGIN
2903: PetscErrorCode MatGetDiagonalBlock_MPIBAIJ(Mat A,Mat *a)
2904: {
2906: *a = ((Mat_MPIBAIJ *)A->data)->A;
2907: return(0);
2908: }
2909: EXTERN_C_END
2911: EXTERN_C_BEGIN
2912: extern PetscErrorCode MatConvert_MPIBAIJ_MPISBAIJ(Mat, MatType,MatReuse,Mat*);
2913: EXTERN_C_END
2915: EXTERN_C_BEGIN
2918: PetscErrorCode MatMPIBAIJSetPreallocationCSR_MPIBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[])
2919: {
2920: PetscInt m,rstart,cstart,cend;
2921: PetscInt i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0;
2922: const PetscInt *JJ=0;
2923: PetscScalar *values=0;
2927: PetscLayoutSetBlockSize(B->rmap,bs);
2928: PetscLayoutSetBlockSize(B->cmap,bs);
2929: PetscLayoutSetUp(B->rmap);
2930: PetscLayoutSetUp(B->cmap);
2931: PetscLayoutGetBlockSize(B->rmap,&bs);
2932: m = B->rmap->n/bs;
2933: rstart = B->rmap->rstart/bs;
2934: cstart = B->cmap->rstart/bs;
2935: cend = B->cmap->rend/bs;
2937: if (ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]);
2938: PetscMalloc2(m,PetscInt,&d_nnz,m,PetscInt,&o_nnz);
2939: for (i=0; i<m; i++) {
2940: nz = ii[i+1] - ii[i];
2941: if (nz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz);
2942: nz_max = PetscMax(nz_max,nz);
2943: JJ = jj + ii[i];
2944: for (j=0; j<nz; j++) {
2945: if (*JJ >= cstart) break;
2946: JJ++;
2947: }
2948: d = 0;
2949: for (; j<nz; j++) {
2950: if (*JJ++ >= cend) break;
2951: d++;
2952: }
2953: d_nnz[i] = d;
2954: o_nnz[i] = nz - d;
2955: }
2956: MatMPIBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);
2957: PetscFree2(d_nnz,o_nnz);
2959: values = (PetscScalar*)V;
2960: if (!values) {
2961: PetscMalloc(bs*bs*nz_max*sizeof(PetscScalar),&values);
2962: PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));
2963: }
2964: for (i=0; i<m; i++) {
2965: PetscInt row = i + rstart;
2966: PetscInt ncols = ii[i+1] - ii[i];
2967: const PetscInt *icols = jj + ii[i];
2968: const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0);
2969: MatSetValuesBlocked_MPIBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);
2970: }
2972: if (!V) { PetscFree(values); }
2973: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2974: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2975: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
2976: return(0);
2977: }
2978: EXTERN_C_END
2982: /*@C
2983: MatMPIBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in BAIJ format
2984: (the default parallel PETSc format).
2986: Collective on MPI_Comm
2988: Input Parameters:
2989: + A - the matrix
2990: . bs - the block size
2991: . i - the indices into j for the start of each local row (starts with zero)
2992: . j - the column indices for each local row (starts with zero) these must be sorted for each row
2993: - v - optional values in the matrix
2995: Level: developer
2997: .keywords: matrix, aij, compressed row, sparse, parallel
2999: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateAIJ(), MPIAIJ
3000: @*/
3001: PetscErrorCode MatMPIBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3002: {
3009: PetscTryMethod(B,"MatMPIBAIJSetPreallocationCSR_C",(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,bs,i,j,v));
3010: return(0);
3011: }
3013: EXTERN_C_BEGIN
3016: PetscErrorCode MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
3017: {
3018: Mat_MPIBAIJ *b;
3020: PetscInt i;
3021: PetscBool d_realalloc = PETSC_FALSE,o_realalloc = PETSC_FALSE;
3024: if (d_nz >= 0 || d_nnz) d_realalloc = PETSC_TRUE;
3025: if (o_nz >= 0 || o_nnz) o_realalloc = PETSC_TRUE;
3027: if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
3028: if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
3029: if (d_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
3030: if (o_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
3031:
3032: PetscLayoutSetBlockSize(B->rmap,bs);
3033: PetscLayoutSetBlockSize(B->cmap,bs);
3034: PetscLayoutSetUp(B->rmap);
3035: PetscLayoutSetUp(B->cmap);
3036: PetscLayoutGetBlockSize(B->rmap,&bs);
3038: if (d_nnz) {
3039: for (i=0; i<B->rmap->n/bs; i++) {
3040: if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
3041: }
3042: }
3043: if (o_nnz) {
3044: for (i=0; i<B->rmap->n/bs; i++) {
3045: if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
3046: }
3047: }
3049: b = (Mat_MPIBAIJ*)B->data;
3050: b->bs2 = bs*bs;
3051: b->mbs = B->rmap->n/bs;
3052: b->nbs = B->cmap->n/bs;
3053: b->Mbs = B->rmap->N/bs;
3054: b->Nbs = B->cmap->N/bs;
3056: for (i=0; i<=b->size; i++) {
3057: b->rangebs[i] = B->rmap->range[i]/bs;
3058: }
3059: b->rstartbs = B->rmap->rstart/bs;
3060: b->rendbs = B->rmap->rend/bs;
3061: b->cstartbs = B->cmap->rstart/bs;
3062: b->cendbs = B->cmap->rend/bs;
3064: if (!B->preallocated) {
3065: MatCreate(PETSC_COMM_SELF,&b->A);
3066: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
3067: MatSetType(b->A,MATSEQBAIJ);
3068: PetscLogObjectParent(B,b->A);
3069: MatCreate(PETSC_COMM_SELF,&b->B);
3070: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
3071: MatSetType(b->B,MATSEQBAIJ);
3072: PetscLogObjectParent(B,b->B);
3073: MatStashCreate_Private(((PetscObject)B)->comm,bs,&B->bstash);
3074: }
3076: MatSeqBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
3077: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
3078: /* Do not error if the user did not give real preallocation information. Ugly because this would overwrite a previous user call to MatSetOption(). */
3079: if (!d_realalloc) {MatSetOption(b->A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);}
3080: if (!o_realalloc) {MatSetOption(b->B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);}
3081: B->preallocated = PETSC_TRUE;
3082: return(0);
3083: }
3084: EXTERN_C_END
3086: EXTERN_C_BEGIN
3087: extern PetscErrorCode MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec);
3088: extern PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal);
3089: EXTERN_C_END
3092: EXTERN_C_BEGIN
3095: PetscErrorCode MatConvert_MPIBAIJ_MPIAdj(Mat B, const MatType newtype,MatReuse reuse,Mat *adj)
3096: {
3097: Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data;
3099: Mat_SeqBAIJ *d = (Mat_SeqBAIJ*) b->A->data,*o = (Mat_SeqBAIJ*) b->B->data;
3100: PetscInt M = B->rmap->n/B->rmap->bs,i,*ii,*jj,cnt,j,k,rstart = B->rmap->rstart/B->rmap->bs;
3101: const PetscInt *id = d->i, *jd = d->j, *io = o->i, *jo = o->j, *garray = b->garray;
3104: PetscMalloc((M+1)*sizeof(PetscInt),&ii);
3105: ii[0] = 0;
3106: CHKMEMQ;
3107: for (i=0; i<M; i++) {
3108: if ((id[i+1] - id[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,id[i],id[i+1]);
3109: if ((io[i+1] - io[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,io[i],io[i+1]);
3110: ii[i+1] = ii[i] + id[i+1] - id[i] + io[i+1] - io[i];
3111: /* remove one from count of matrix has diagonal */
3112: for (j=id[i]; j<id[i+1]; j++) {
3113: if (jd[j] == i) {ii[i+1]--;break;}
3114: }
3115: CHKMEMQ;
3116: }
3117: PetscMalloc(ii[M]*sizeof(PetscInt),&jj);
3118: cnt = 0;
3119: for (i=0; i<M; i++) {
3120: for (j=io[i]; j<io[i+1]; j++) {
3121: if (garray[jo[j]] > rstart) break;
3122: jj[cnt++] = garray[jo[j]];
3123: CHKMEMQ;
3124: }
3125: for (k=id[i]; k<id[i+1]; k++) {
3126: if (jd[k] != i) {
3127: jj[cnt++] = rstart + jd[k];
3128: CHKMEMQ;
3129: }
3130: }
3131: for (;j<io[i+1]; j++) {
3132: jj[cnt++] = garray[jo[j]];
3133: CHKMEMQ;
3134: }
3135: }
3136: MatCreateMPIAdj(((PetscObject)B)->comm,M,B->cmap->N/B->rmap->bs,ii,jj,PETSC_NULL,adj);
3137: return(0);
3138: }
3139: EXTERN_C_END
3141: #include <../src/mat/impls/aij/mpi/mpiaij.h>
3142: EXTERN_C_BEGIN
3143: PetscErrorCode MatConvert_SeqBAIJ_SeqAIJ(Mat,const MatType,MatReuse,Mat*);
3144: EXTERN_C_END
3146: EXTERN_C_BEGIN
3149: PetscErrorCode MatConvert_MPIBAIJ_MPIAIJ(Mat A,const MatType newtype,MatReuse reuse,Mat *newmat)
3150: {
3152: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
3153: Mat B;
3154: Mat_MPIAIJ *b;
3157: if (!A->assembled) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Matrix must be assembled");
3159: MatCreate(((PetscObject)A)->comm,&B);
3160: MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
3161: MatSetType(B,MATMPIAIJ);
3162: MatSeqAIJSetPreallocation(B,0,PETSC_NULL);
3163: MatMPIAIJSetPreallocation(B,0,PETSC_NULL,0,PETSC_NULL);
3164: b = (Mat_MPIAIJ*) B->data;
3166: MatDestroy(&b->A);
3167: MatDestroy(&b->B);
3168: MatDisAssemble_MPIBAIJ(A);
3169: MatConvert_SeqBAIJ_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A);
3170: MatConvert_SeqBAIJ_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B);
3171: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3172: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3173: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
3174: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
3175: if (reuse == MAT_REUSE_MATRIX) {
3176: MatHeaderReplace(A,B);
3177: } else {
3178: *newmat = B;
3179: }
3180: return(0);
3181: }
3182: EXTERN_C_END
3183:
3184: EXTERN_C_BEGIN
3185: #if defined(PETSC_HAVE_MUMPS)
3186: extern PetscErrorCode MatGetFactor_baij_mumps(Mat,MatFactorType,Mat*);
3187: #endif
3188: EXTERN_C_END
3190: /*MC
3191: MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices.
3193: Options Database Keys:
3194: + -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions()
3195: . -mat_block_size <bs> - set the blocksize used to store the matrix
3196: - -mat_use_hash_table <fact>
3198: Level: beginner
3200: .seealso: MatCreateMPIBAIJ
3201: M*/
3203: EXTERN_C_BEGIN
3204: extern PetscErrorCode MatConvert_MPIBAIJ_MPIBSTRM(Mat,const MatType,MatReuse,Mat*);
3205: EXTERN_C_END
3207: EXTERN_C_BEGIN
3210: PetscErrorCode MatCreate_MPIBAIJ(Mat B)
3211: {
3212: Mat_MPIBAIJ *b;
3214: PetscBool flg;
3217: PetscNewLog(B,Mat_MPIBAIJ,&b);
3218: B->data = (void*)b;
3220: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
3221: B->assembled = PETSC_FALSE;
3223: B->insertmode = NOT_SET_VALUES;
3224: MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);
3225: MPI_Comm_size(((PetscObject)B)->comm,&b->size);
3227: /* build local table of row and column ownerships */
3228: PetscMalloc((b->size+1)*sizeof(PetscInt),&b->rangebs);
3230: /* build cache for off array entries formed */
3231: MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);
3232: b->donotstash = PETSC_FALSE;
3233: b->colmap = PETSC_NULL;
3234: b->garray = PETSC_NULL;
3235: b->roworiented = PETSC_TRUE;
3237: /* stuff used in block assembly */
3238: b->barray = 0;
3240: /* stuff used for matrix vector multiply */
3241: b->lvec = 0;
3242: b->Mvctx = 0;
3244: /* stuff for MatGetRow() */
3245: b->rowindices = 0;
3246: b->rowvalues = 0;
3247: b->getrowactive = PETSC_FALSE;
3249: /* hash table stuff */
3250: b->ht = 0;
3251: b->hd = 0;
3252: b->ht_size = 0;
3253: b->ht_flag = PETSC_FALSE;
3254: b->ht_fact = 0;
3255: b->ht_total_ct = 0;
3256: b->ht_insert_ct = 0;
3258: /* stuff for MatGetSubMatrices_MPIBAIJ_local() */
3259: b->ijonly = PETSC_FALSE;
3261: PetscOptionsBegin(((PetscObject)B)->comm,PETSC_NULL,"Options for loading MPIBAIJ matrix 1","Mat");
3262: PetscOptionsBool("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);
3263: if (flg) {
3264: PetscReal fact = 1.39;
3265: MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);
3266: PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);
3267: if (fact <= 1.0) fact = 1.39;
3268: MatMPIBAIJSetHashTableFactor(B,fact);
3269: PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);
3270: }
3271: PetscOptionsEnd();
3273: #if defined(PETSC_HAVE_MUMPS)
3274: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mumps_C", "MatGetFactor_baij_mumps",MatGetFactor_baij_mumps);
3275: #endif
3276: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpibaij_mpiadj_C",
3277: "MatConvert_MPIBAIJ_MPIAdj",
3278: MatConvert_MPIBAIJ_MPIAdj);
3279: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpibaij_mpiaij_C",
3280: "MatConvert_MPIBAIJ_MPIAIJ",
3281: MatConvert_MPIBAIJ_MPIAIJ);
3282: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpibaij_mpisbaij_C",
3283: "MatConvert_MPIBAIJ_MPISBAIJ",
3284: MatConvert_MPIBAIJ_MPISBAIJ);
3285: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
3286: "MatStoreValues_MPIBAIJ",
3287: MatStoreValues_MPIBAIJ);
3288: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
3289: "MatRetrieveValues_MPIBAIJ",
3290: MatRetrieveValues_MPIBAIJ);
3291: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
3292: "MatGetDiagonalBlock_MPIBAIJ",
3293: MatGetDiagonalBlock_MPIBAIJ);
3294: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocation_C",
3295: "MatMPIBAIJSetPreallocation_MPIBAIJ",
3296: MatMPIBAIJSetPreallocation_MPIBAIJ);
3297: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",
3298: "MatMPIBAIJSetPreallocationCSR_MPIBAIJ",
3299: MatMPIBAIJSetPreallocationCSR_MPIBAIJ);
3300: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
3301: "MatDiagonalScaleLocal_MPIBAIJ",
3302: MatDiagonalScaleLocal_MPIBAIJ);
3303: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatSetHashTableFactor_C",
3304: "MatSetHashTableFactor_MPIBAIJ",
3305: MatSetHashTableFactor_MPIBAIJ);
3306: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpibaij_mpibstrm_C",
3307: "MatConvert_MPIBAIJ_MPIBSTRM",
3308: MatConvert_MPIBAIJ_MPIBSTRM);
3309: PetscObjectChangeTypeName((PetscObject)B,MATMPIBAIJ);
3310: return(0);
3311: }
3312: EXTERN_C_END
3314: /*MC
3315: MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices.
3317: This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator,
3318: and MATMPIBAIJ otherwise.
3320: Options Database Keys:
3321: . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions()
3323: Level: beginner
3325: .seealso: MatCreateBAIJ(),MATSEQBAIJ,MATMPIBAIJ, MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
3326: M*/
3330: /*@C
3331: MatMPIBAIJSetPreallocation - Allocates memory for a sparse parallel matrix in block AIJ format
3332: (block compressed row). For good matrix assembly performance
3333: the user should preallocate the matrix storage by setting the parameters
3334: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3335: performance can be increased by more than a factor of 50.
3337: Collective on Mat
3339: Input Parameters:
3340: + A - the matrix
3341: . bs - size of blockk
3342: . d_nz - number of block nonzeros per block row in diagonal portion of local
3343: submatrix (same for all local rows)
3344: . d_nnz - array containing the number of block nonzeros in the various block rows
3345: of the in diagonal portion of the local (possibly different for each block
3346: row) or PETSC_NULL. If you plan to factor the matrix you must leave room for the diagonal entry and
3347: set it even if it is zero.
3348: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
3349: submatrix (same for all local rows).
3350: - o_nnz - array containing the number of nonzeros in the various block rows of the
3351: off-diagonal portion of the local submatrix (possibly different for
3352: each block row) or PETSC_NULL.
3354: If the *_nnz parameter is given then the *_nz parameter is ignored
3356: Options Database Keys:
3357: + -mat_block_size - size of the blocks to use
3358: - -mat_use_hash_table <fact>
3360: Notes:
3361: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
3362: than it must be used on all processors that share the object for that argument.
3364: Storage Information:
3365: For a square global matrix we define each processor's diagonal portion
3366: to be its local rows and the corresponding columns (a square submatrix);
3367: each processor's off-diagonal portion encompasses the remainder of the
3368: local matrix (a rectangular submatrix).
3370: The user can specify preallocated storage for the diagonal part of
3371: the local submatrix with either d_nz or d_nnz (not both). Set
3372: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
3373: memory allocation. Likewise, specify preallocated storage for the
3374: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
3376: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
3377: the figure below we depict these three local rows and all columns (0-11).
3379: .vb
3380: 0 1 2 3 4 5 6 7 8 9 10 11
3381: -------------------
3382: row 3 | o o o d d d o o o o o o
3383: row 4 | o o o d d d o o o o o o
3384: row 5 | o o o d d d o o o o o o
3385: -------------------
3386: .ve
3387:
3388: Thus, any entries in the d locations are stored in the d (diagonal)
3389: submatrix, and any entries in the o locations are stored in the
3390: o (off-diagonal) submatrix. Note that the d and the o submatrices are
3391: stored simply in the MATSEQBAIJ format for compressed row storage.
3393: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
3394: and o_nz should indicate the number of block nonzeros per row in the o matrix.
3395: In general, for PDE problems in which most nonzeros are near the diagonal,
3396: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
3397: or you will get TERRIBLE performance; see the users' manual chapter on
3398: matrices.
3400: You can call MatGetInfo() to get information on how effective the preallocation was;
3401: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3402: You can also run with the option -info and look for messages with the string
3403: malloc in them to see if additional memory allocation was needed.
3405: Level: intermediate
3407: .keywords: matrix, block, aij, compressed row, sparse, parallel
3409: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocationCSR()
3410: @*/
3411: PetscErrorCode MatMPIBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3412: {
3419: PetscTryMethod(B,"MatMPIBAIJSetPreallocation_C",(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,bs,d_nz,d_nnz,o_nz,o_nnz));
3420: return(0);
3421: }
3425: /*@C
3426: MatCreateBAIJ - Creates a sparse parallel matrix in block AIJ format
3427: (block compressed row). For good matrix assembly performance
3428: the user should preallocate the matrix storage by setting the parameters
3429: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3430: performance can be increased by more than a factor of 50.
3432: Collective on MPI_Comm
3434: Input Parameters:
3435: + comm - MPI communicator
3436: . bs - size of blockk
3437: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3438: This value should be the same as the local size used in creating the
3439: y vector for the matrix-vector product y = Ax.
3440: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
3441: This value should be the same as the local size used in creating the
3442: x vector for the matrix-vector product y = Ax.
3443: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3444: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3445: . d_nz - number of nonzero blocks per block row in diagonal portion of local
3446: submatrix (same for all local rows)
3447: . d_nnz - array containing the number of nonzero blocks in the various block rows
3448: of the in diagonal portion of the local (possibly different for each block
3449: row) or PETSC_NULL. If you plan to factor the matrix you must leave room for the diagonal entry
3450: and set it even if it is zero.
3451: . o_nz - number of nonzero blocks per block row in the off-diagonal portion of local
3452: submatrix (same for all local rows).
3453: - o_nnz - array containing the number of nonzero blocks in the various block rows of the
3454: off-diagonal portion of the local submatrix (possibly different for
3455: each block row) or PETSC_NULL.
3457: Output Parameter:
3458: . A - the matrix
3460: Options Database Keys:
3461: + -mat_block_size - size of the blocks to use
3462: - -mat_use_hash_table <fact>
3464: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3465: MatXXXXSetPreallocation() paradgm instead of this routine directly.
3466: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3468: Notes:
3469: If the *_nnz parameter is given then the *_nz parameter is ignored
3471: A nonzero block is any block that as 1 or more nonzeros in it
3473: The user MUST specify either the local or global matrix dimensions
3474: (possibly both).
3476: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
3477: than it must be used on all processors that share the object for that argument.
3479: Storage Information:
3480: For a square global matrix we define each processor's diagonal portion
3481: to be its local rows and the corresponding columns (a square submatrix);
3482: each processor's off-diagonal portion encompasses the remainder of the
3483: local matrix (a rectangular submatrix).
3485: The user can specify preallocated storage for the diagonal part of
3486: the local submatrix with either d_nz or d_nnz (not both). Set
3487: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
3488: memory allocation. Likewise, specify preallocated storage for the
3489: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
3491: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
3492: the figure below we depict these three local rows and all columns (0-11).
3494: .vb
3495: 0 1 2 3 4 5 6 7 8 9 10 11
3496: -------------------
3497: row 3 | o o o d d d o o o o o o
3498: row 4 | o o o d d d o o o o o o
3499: row 5 | o o o d d d o o o o o o
3500: -------------------
3501: .ve
3502:
3503: Thus, any entries in the d locations are stored in the d (diagonal)
3504: submatrix, and any entries in the o locations are stored in the
3505: o (off-diagonal) submatrix. Note that the d and the o submatrices are
3506: stored simply in the MATSEQBAIJ format for compressed row storage.
3508: Now d_nz should indicate the number of block nonzeros per row in the d matrix,
3509: and o_nz should indicate the number of block nonzeros per row in the o matrix.
3510: In general, for PDE problems in which most nonzeros are near the diagonal,
3511: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
3512: or you will get TERRIBLE performance; see the users' manual chapter on
3513: matrices.
3515: Level: intermediate
3517: .keywords: matrix, block, aij, compressed row, sparse, parallel
3519: .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR()
3520: @*/
3521: PetscErrorCode MatCreateBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3522: {
3524: PetscMPIInt size;
3527: MatCreate(comm,A);
3528: MatSetSizes(*A,m,n,M,N);
3529: MPI_Comm_size(comm,&size);
3530: if (size > 1) {
3531: MatSetType(*A,MATMPIBAIJ);
3532: MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
3533: } else {
3534: MatSetType(*A,MATSEQBAIJ);
3535: MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
3536: }
3537: return(0);
3538: }
3542: static PetscErrorCode MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
3543: {
3544: Mat mat;
3545: Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data;
3547: PetscInt len=0;
3550: *newmat = 0;
3551: MatCreate(((PetscObject)matin)->comm,&mat);
3552: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
3553: MatSetType(mat,((PetscObject)matin)->type_name);
3554: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
3556: mat->factortype = matin->factortype;
3557: mat->preallocated = PETSC_TRUE;
3558: mat->assembled = PETSC_TRUE;
3559: mat->insertmode = NOT_SET_VALUES;
3561: a = (Mat_MPIBAIJ*)mat->data;
3562: mat->rmap->bs = matin->rmap->bs;
3563: a->bs2 = oldmat->bs2;
3564: a->mbs = oldmat->mbs;
3565: a->nbs = oldmat->nbs;
3566: a->Mbs = oldmat->Mbs;
3567: a->Nbs = oldmat->Nbs;
3568:
3569: PetscLayoutReference(matin->rmap,&mat->rmap);
3570: PetscLayoutReference(matin->cmap,&mat->cmap);
3572: a->size = oldmat->size;
3573: a->rank = oldmat->rank;
3574: a->donotstash = oldmat->donotstash;
3575: a->roworiented = oldmat->roworiented;
3576: a->rowindices = 0;
3577: a->rowvalues = 0;
3578: a->getrowactive = PETSC_FALSE;
3579: a->barray = 0;
3580: a->rstartbs = oldmat->rstartbs;
3581: a->rendbs = oldmat->rendbs;
3582: a->cstartbs = oldmat->cstartbs;
3583: a->cendbs = oldmat->cendbs;
3585: /* hash table stuff */
3586: a->ht = 0;
3587: a->hd = 0;
3588: a->ht_size = 0;
3589: a->ht_flag = oldmat->ht_flag;
3590: a->ht_fact = oldmat->ht_fact;
3591: a->ht_total_ct = 0;
3592: a->ht_insert_ct = 0;
3594: PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+1)*sizeof(PetscInt));
3595: if (oldmat->colmap) {
3596: #if defined (PETSC_USE_CTABLE)
3597: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
3598: #else
3599: PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
3600: PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
3601: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
3602: #endif
3603: } else a->colmap = 0;
3605: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
3606: PetscMalloc(len*sizeof(PetscInt),&a->garray);
3607: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
3608: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
3609: } else a->garray = 0;
3610:
3611: MatStashCreate_Private(((PetscObject)matin)->comm,matin->rmap->bs,&mat->bstash);
3612: VecDuplicate(oldmat->lvec,&a->lvec);
3613: PetscLogObjectParent(mat,a->lvec);
3614: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
3615: PetscLogObjectParent(mat,a->Mvctx);
3617: MatDuplicate(oldmat->A,cpvalues,&a->A);
3618: PetscLogObjectParent(mat,a->A);
3619: MatDuplicate(oldmat->B,cpvalues,&a->B);
3620: PetscLogObjectParent(mat,a->B);
3621: PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
3622: *newmat = mat;
3624: return(0);
3625: }
3629: PetscErrorCode MatLoad_MPIBAIJ(Mat newmat,PetscViewer viewer)
3630: {
3632: int fd;
3633: PetscInt i,nz,j,rstart,rend;
3634: PetscScalar *vals,*buf;
3635: MPI_Comm comm = ((PetscObject)viewer)->comm;
3636: MPI_Status status;
3637: PetscMPIInt rank,size,maxnz;
3638: PetscInt header[4],*rowlengths = 0,M,N,m,*rowners,*cols;
3639: PetscInt *locrowlens = PETSC_NULL,*procsnz = PETSC_NULL,*browners = PETSC_NULL;
3640: PetscInt jj,*mycols,*ibuf,bs=1,Mbs,mbs,extra_rows,mmax;
3641: PetscMPIInt tag = ((PetscObject)viewer)->tag;
3642: PetscInt *dlens = PETSC_NULL,*odlens = PETSC_NULL,*mask = PETSC_NULL,*masked1 = PETSC_NULL,*masked2 = PETSC_NULL,rowcount,odcount;
3643: PetscInt dcount,kmax,k,nzcount,tmp,mend,sizesset=1,grows,gcols;
3646: PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPIBAIJ matrix 2","Mat");
3647: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);
3648: PetscOptionsEnd();
3650: MPI_Comm_size(comm,&size);
3651: MPI_Comm_rank(comm,&rank);
3652: if (!rank) {
3653: PetscViewerBinaryGetDescriptor(viewer,&fd);
3654: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
3655: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
3656: }
3658: if (newmat->rmap->n < 0 && newmat->rmap->N < 0 && newmat->cmap->n < 0 && newmat->cmap->N < 0) sizesset = 0;
3660: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
3661: M = header[1]; N = header[2];
3663: /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */
3664: if (sizesset && newmat->rmap->N < 0) newmat->rmap->N = M;
3665: if (sizesset && newmat->cmap->N < 0) newmat->cmap->N = N;
3666:
3667: /* If global sizes are set, check if they are consistent with that given in the file */
3668: if (sizesset) {
3669: MatGetSize(newmat,&grows,&gcols);
3670: }
3671: if (sizesset && newmat->rmap->N != grows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%d) and input matrix has (%d)",M,grows);
3672: if (sizesset && newmat->cmap->N != gcols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%d) and input matrix has (%d)",N,gcols);
3674: if (M != N) SETERRQ(((PetscObject)viewer)->comm,PETSC_ERR_SUP,"Can only do square matrices");
3676: /*
3677: This code adds extra rows to make sure the number of rows is
3678: divisible by the blocksize
3679: */
3680: Mbs = M/bs;
3681: extra_rows = bs - M + bs*Mbs;
3682: if (extra_rows == bs) extra_rows = 0;
3683: else Mbs++;
3684: if (extra_rows && !rank) {
3685: PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");
3686: }
3688: /* determine ownership of all rows */
3689: if (newmat->rmap->n < 0) { /* PETSC_DECIDE */
3690: mbs = Mbs/size + ((Mbs % size) > rank);
3691: m = mbs*bs;
3692: } else { /* User set */
3693: m = newmat->rmap->n;
3694: mbs = m/bs;
3695: }
3696: PetscMalloc2(size+1,PetscInt,&rowners,size+1,PetscInt,&browners);
3697: MPI_Allgather(&mbs,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
3699: /* process 0 needs enough room for process with most rows */
3700: if (!rank) {
3701: mmax = rowners[1];
3702: for (i=2; i<=size; i++) {
3703: mmax = PetscMax(mmax,rowners[i]);
3704: }
3705: mmax*=bs;
3706: };
3708: rowners[0] = 0;
3709: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
3710: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
3711: rstart = rowners[rank];
3712: rend = rowners[rank+1];
3714: /* distribute row lengths to all processors */
3715: PetscMalloc(m*sizeof(PetscInt),&locrowlens);
3716: if (!rank) {
3717: mend = m;
3718: if (size == 1) mend = mend - extra_rows;
3719: PetscBinaryRead(fd,locrowlens,mend,PETSC_INT);
3720: for (j=mend; j<m; j++) locrowlens[j] = 1;
3721: PetscMalloc(mmax*sizeof(PetscInt),&rowlengths);
3722: PetscMalloc(size*sizeof(PetscInt),&procsnz);
3723: PetscMemzero(procsnz,size*sizeof(PetscInt));
3724: for (j=0; j<m; j++) {
3725: procsnz[0] += locrowlens[j];
3726: }
3727: for (i=1; i<size; i++) {
3728: mend = browners[i+1] - browners[i];
3729: if (i == size-1) mend = mend - extra_rows;
3730: PetscBinaryRead(fd,rowlengths,mend,PETSC_INT);
3731: for (j=mend; j<browners[i+1] - browners[i]; j++) rowlengths[j] = 1;
3732: /* calculate the number of nonzeros on each processor */
3733: for (j=0; j<browners[i+1]-browners[i]; j++) {
3734: procsnz[i] += rowlengths[j];
3735: }
3736: MPI_Send(rowlengths,browners[i+1]-browners[i],MPIU_INT,i,tag,comm);
3737: }
3738: PetscFree(rowlengths);
3739: } else {
3740: MPI_Recv(locrowlens,m,MPIU_INT,0,tag,comm,&status);
3741: }
3743: if (!rank) {
3744: /* determine max buffer needed and allocate it */
3745: maxnz = procsnz[0];
3746: for (i=1; i<size; i++) {
3747: maxnz = PetscMax(maxnz,procsnz[i]);
3748: }
3749: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
3751: /* read in my part of the matrix column indices */
3752: nz = procsnz[0];
3753: PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);
3754: mycols = ibuf;
3755: if (size == 1) nz -= extra_rows;
3756: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
3757: if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
3759: /* read in every ones (except the last) and ship off */
3760: for (i=1; i<size-1; i++) {
3761: nz = procsnz[i];
3762: PetscBinaryRead(fd,cols,nz,PETSC_INT);
3763: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
3764: }
3765: /* read in the stuff for the last proc */
3766: if (size != 1) {
3767: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
3768: PetscBinaryRead(fd,cols,nz,PETSC_INT);
3769: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
3770: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
3771: }
3772: PetscFree(cols);
3773: } else {
3774: /* determine buffer space needed for message */
3775: nz = 0;
3776: for (i=0; i<m; i++) {
3777: nz += locrowlens[i];
3778: }
3779: PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);
3780: mycols = ibuf;
3781: /* receive message of column indices*/
3782: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
3783: MPI_Get_count(&status,MPIU_INT,&maxnz);
3784: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
3785: }
3786:
3787: /* loop over local rows, determining number of off diagonal entries */
3788: PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);
3789: PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);
3790: PetscMemzero(mask,Mbs*sizeof(PetscInt));
3791: PetscMemzero(masked1,Mbs*sizeof(PetscInt));
3792: PetscMemzero(masked2,Mbs*sizeof(PetscInt));
3793: rowcount = 0; nzcount = 0;
3794: for (i=0; i<mbs; i++) {
3795: dcount = 0;
3796: odcount = 0;
3797: for (j=0; j<bs; j++) {
3798: kmax = locrowlens[rowcount];
3799: for (k=0; k<kmax; k++) {
3800: tmp = mycols[nzcount++]/bs;
3801: if (!mask[tmp]) {
3802: mask[tmp] = 1;
3803: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp;
3804: else masked1[dcount++] = tmp;
3805: }
3806: }
3807: rowcount++;
3808: }
3809:
3810: dlens[i] = dcount;
3811: odlens[i] = odcount;
3813: /* zero out the mask elements we set */
3814: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
3815: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
3816: }
3818:
3819: if (!sizesset) {
3820: MatSetSizes(newmat,m,m,M+extra_rows,N+extra_rows);
3821: }
3822: MatMPIBAIJSetPreallocation(newmat,bs,0,dlens,0,odlens);
3824: if (!rank) {
3825: PetscMalloc((maxnz+1)*sizeof(PetscScalar),&buf);
3826: /* read in my part of the matrix numerical values */
3827: nz = procsnz[0];
3828: vals = buf;
3829: mycols = ibuf;
3830: if (size == 1) nz -= extra_rows;
3831: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3832: if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
3834: /* insert into matrix */
3835: jj = rstart*bs;
3836: for (i=0; i<m; i++) {
3837: MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
3838: mycols += locrowlens[i];
3839: vals += locrowlens[i];
3840: jj++;
3841: }
3842: /* read in other processors (except the last one) and ship out */
3843: for (i=1; i<size-1; i++) {
3844: nz = procsnz[i];
3845: vals = buf;
3846: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3847: MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
3848: }
3849: /* the last proc */
3850: if (size != 1){
3851: nz = procsnz[i] - extra_rows;
3852: vals = buf;
3853: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
3854: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
3855: MPIULong_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)newmat)->tag,comm);
3856: }
3857: PetscFree(procsnz);
3858: } else {
3859: /* receive numeric values */
3860: PetscMalloc((nz+1)*sizeof(PetscScalar),&buf);
3862: /* receive message of values*/
3863: vals = buf;
3864: mycols = ibuf;
3865: MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm);
3867: /* insert into matrix */
3868: jj = rstart*bs;
3869: for (i=0; i<m; i++) {
3870: MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
3871: mycols += locrowlens[i];
3872: vals += locrowlens[i];
3873: jj++;
3874: }
3875: }
3876: PetscFree(locrowlens);
3877: PetscFree(buf);
3878: PetscFree(ibuf);
3879: PetscFree2(rowners,browners);
3880: PetscFree2(dlens,odlens);
3881: PetscFree3(mask,masked1,masked2);
3882: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
3883: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
3885: return(0);
3886: }
3890: /*@
3891: MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
3893: Input Parameters:
3894: . mat - the matrix
3895: . fact - factor
3897: Not Collective, each process can use a different factor
3899: Level: advanced
3901: Notes:
3902: This can also be set by the command line option: -mat_use_hash_table <fact>
3904: .keywords: matrix, hashtable, factor, HT
3906: .seealso: MatSetOption()
3907: @*/
3908: PetscErrorCode MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact)
3909: {
3913: PetscTryMethod(mat,"MatSetHashTableFactor_C",(Mat,PetscReal),(mat,fact));
3914: return(0);
3915: }
3917: EXTERN_C_BEGIN
3920: PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact)
3921: {
3922: Mat_MPIBAIJ *baij;
3925: baij = (Mat_MPIBAIJ*)mat->data;
3926: baij->ht_fact = fact;
3927: return(0);
3928: }
3929: EXTERN_C_END
3933: PetscErrorCode MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[])
3934: {
3935: Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data;
3937: *Ad = a->A;
3938: *Ao = a->B;
3939: *colmap = a->garray;
3940: return(0);
3941: }
3943: /*
3944: Special version for direct calls from Fortran (to eliminate two function call overheads
3945: */
3946: #if defined(PETSC_HAVE_FORTRAN_CAPS)
3947: #define matmpibaijsetvaluesblocked_ MATMPIBAIJSETVALUESBLOCKED
3948: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
3949: #define matmpibaijsetvaluesblocked_ matmpibaijsetvaluesblocked
3950: #endif
3954: /*@C
3955: MatMPIBAIJSetValuesBlocked - Direct Fortran call to replace call to MatSetValuesBlocked()
3957: Collective on Mat
3959: Input Parameters:
3960: + mat - the matrix
3961: . min - number of input rows
3962: . im - input rows
3963: . nin - number of input columns
3964: . in - input columns
3965: . v - numerical values input
3966: - addvin - INSERT_VALUES or ADD_VALUES
3968: Notes: This has a complete copy of MatSetValuesBlocked_MPIBAIJ() which is terrible code un-reuse.
3970: Level: advanced
3972: .seealso: MatSetValuesBlocked()
3973: @*/
3974: PetscErrorCode matmpibaijsetvaluesblocked_(Mat *matin,PetscInt *min,const PetscInt im[],PetscInt *nin,const PetscInt in[],const MatScalar v[],InsertMode *addvin)
3975: {
3976: /* convert input arguments to C version */
3977: Mat mat = *matin;
3978: PetscInt m = *min, n = *nin;
3979: InsertMode addv = *addvin;
3981: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
3982: const MatScalar *value;
3983: MatScalar *barray=baij->barray;
3984: PetscBool roworiented = baij->roworiented;
3985: PetscErrorCode ierr;
3986: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
3987: PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval;
3988: PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2;
3989:
3991: /* tasks normally handled by MatSetValuesBlocked() */
3992: if (mat->insertmode == NOT_SET_VALUES) {
3993: mat->insertmode = addv;
3994: }
3995: #if defined(PETSC_USE_DEBUG)
3996: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
3997: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3998: #endif
3999: if (mat->assembled) {
4000: mat->was_assembled = PETSC_TRUE;
4001: mat->assembled = PETSC_FALSE;
4002: }
4003: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
4006: if(!barray) {
4007: PetscMalloc(bs2*sizeof(MatScalar),&barray);
4008: baij->barray = barray;
4009: }
4011: if (roworiented) {
4012: stepval = (n-1)*bs;
4013: } else {
4014: stepval = (m-1)*bs;
4015: }
4016: for (i=0; i<m; i++) {
4017: if (im[i] < 0) continue;
4018: #if defined(PETSC_USE_DEBUG)
4019: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
4020: #endif
4021: if (im[i] >= rstart && im[i] < rend) {
4022: row = im[i] - rstart;
4023: for (j=0; j<n; j++) {
4024: /* If NumCol = 1 then a copy is not required */
4025: if ((roworiented) && (n == 1)) {
4026: barray = (MatScalar*)v + i*bs2;
4027: } else if((!roworiented) && (m == 1)) {
4028: barray = (MatScalar*)v + j*bs2;
4029: } else { /* Here a copy is required */
4030: if (roworiented) {
4031: value = v + i*(stepval+bs)*bs + j*bs;
4032: } else {
4033: value = v + j*(stepval+bs)*bs + i*bs;
4034: }
4035: for (ii=0; ii<bs; ii++,value+=stepval) {
4036: for (jj=0; jj<bs; jj++) {
4037: *barray++ = *value++;
4038: }
4039: }
4040: barray -=bs2;
4041: }
4042:
4043: if (in[j] >= cstart && in[j] < cend){
4044: col = in[j] - cstart;
4045: MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);
4046: }
4047: else if (in[j] < 0) continue;
4048: #if defined(PETSC_USE_DEBUG)
4049: else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);
4050: #endif
4051: else {
4052: if (mat->was_assembled) {
4053: if (!baij->colmap) {
4054: MatCreateColmap_MPIBAIJ_Private(mat);
4055: }
4057: #if defined(PETSC_USE_DEBUG)
4058: #if defined (PETSC_USE_CTABLE)
4059: { PetscInt data;
4060: PetscTableFind(baij->colmap,in[j]+1,&data);
4061: if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
4062: }
4063: #else
4064: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
4065: #endif
4066: #endif
4067: #if defined (PETSC_USE_CTABLE)
4068: PetscTableFind(baij->colmap,in[j]+1,&col);
4069: col = (col - 1)/bs;
4070: #else
4071: col = (baij->colmap[in[j]] - 1)/bs;
4072: #endif
4073: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
4074: MatDisAssemble_MPIBAIJ(mat);
4075: col = in[j];
4076: }
4077: }
4078: else col = in[j];
4079: MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
4080: }
4081: }
4082: } else {
4083: if (!baij->donotstash) {
4084: if (roworiented) {
4085: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
4086: } else {
4087: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
4088: }
4089: }
4090: }
4091: }
4092:
4093: /* task normally handled by MatSetValuesBlocked() */
4094: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
4095: return(0);
4096: }
4100: /*@
4101: MatCreateMPIBAIJWithArrays - creates a MPI BAIJ matrix using arrays that contain in standard
4102: CSR format the local rows.
4104: Collective on MPI_Comm
4106: Input Parameters:
4107: + comm - MPI communicator
4108: . bs - the block size, only a block size of 1 is supported
4109: . m - number of local rows (Cannot be PETSC_DECIDE)
4110: . n - This value should be the same as the local size used in creating the
4111: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4112: calculated if N is given) For square matrices n is almost always m.
4113: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4114: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4115: . i - row indices
4116: . j - column indices
4117: - a - matrix values
4119: Output Parameter:
4120: . mat - the matrix
4122: Level: intermediate
4124: Notes:
4125: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4126: thus you CANNOT change the matrix entries by changing the values of a[] after you have
4127: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
4129: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
4131: .keywords: matrix, aij, compressed row, sparse, parallel
4133: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4134: MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
4135: @*/
4136: PetscErrorCode MatCreateMPIBAIJWithArrays(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4137: {
4142: if (i[0]) {
4143: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4144: }
4145: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4146: MatCreate(comm,mat);
4147: MatSetSizes(*mat,m,n,M,N);
4148: MatSetType(*mat,MATMPISBAIJ);
4149: MatMPIBAIJSetPreallocationCSR(*mat,bs,i,j,a);
4150: return(0);
4151: }