Actual source code: mmbaij.c
petsc-3.10.5 2019-03-28
2: /*
3: Support for the parallel BAIJ matrix vector multiply
4: */
5: #include <../src/mat/impls/baij/mpi/mpibaij.h>
6: #include <petsc/private/isimpl.h>
8: PetscErrorCode MatSetUpMultiply_MPIBAIJ(Mat mat)
9: {
10: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
11: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)(baij->B->data);
13: PetscInt i,j,*aj = B->j,ec = 0,*garray;
14: PetscInt bs = mat->rmap->bs,*stmp;
15: IS from,to;
16: Vec gvec;
17: #if defined(PETSC_USE_CTABLE)
18: PetscTable gid1_lid1;
19: PetscTablePosition tpos;
20: PetscInt gid,lid;
21: #else
22: PetscInt Nbs = baij->Nbs,*indices;
23: #endif
26: #if defined(PETSC_USE_CTABLE)
27: /* use a table - Mark Adams */
28: PetscTableCreate(B->mbs,baij->Nbs+1,&gid1_lid1);
29: for (i=0; i<B->mbs; i++) {
30: for (j=0; j<B->ilen[i]; j++) {
31: PetscInt data,gid1 = aj[B->i[i]+j] + 1;
32: PetscTableFind(gid1_lid1,gid1,&data);
33: if (!data) {
34: /* one based table */
35: PetscTableAdd(gid1_lid1,gid1,++ec,INSERT_VALUES);
36: }
37: }
38: }
39: /* form array of columns we need */
40: PetscMalloc1(ec+1,&garray);
41: PetscTableGetHeadPosition(gid1_lid1,&tpos);
42: while (tpos) {
43: PetscTableGetNext(gid1_lid1,&tpos,&gid,&lid);
44: gid--; lid--;
45: garray[lid] = gid;
46: }
47: PetscSortInt(ec,garray);
48: PetscTableRemoveAll(gid1_lid1);
49: for (i=0; i<ec; i++) {
50: PetscTableAdd(gid1_lid1,garray[i]+1,i+1,INSERT_VALUES);
51: }
52: /* compact out the extra columns in B */
53: for (i=0; i<B->mbs; i++) {
54: for (j=0; j<B->ilen[i]; j++) {
55: PetscInt gid1 = aj[B->i[i] + j] + 1;
56: PetscTableFind(gid1_lid1,gid1,&lid);
57: lid--;
58: aj[B->i[i]+j] = lid;
59: }
60: }
61: B->nbs = ec;
62: baij->B->cmap->n = baij->B->cmap->N = ec*mat->rmap->bs;
64: PetscLayoutSetUp((baij->B->cmap));
65: PetscTableDestroy(&gid1_lid1);
66: #else
67: /* Make an array as long as the number of columns */
68: /* mark those columns that are in baij->B */
69: PetscCalloc1(Nbs+1,&indices);
70: for (i=0; i<B->mbs; i++) {
71: for (j=0; j<B->ilen[i]; j++) {
72: if (!indices[aj[B->i[i] + j]]) ec++;
73: indices[aj[B->i[i] + j]] = 1;
74: }
75: }
77: /* form array of columns we need */
78: PetscMalloc1(ec+1,&garray);
79: ec = 0;
80: for (i=0; i<Nbs; i++) {
81: if (indices[i]) {
82: garray[ec++] = i;
83: }
84: }
86: /* make indices now point into garray */
87: for (i=0; i<ec; i++) {
88: indices[garray[i]] = i;
89: }
91: /* compact out the extra columns in B */
92: for (i=0; i<B->mbs; i++) {
93: for (j=0; j<B->ilen[i]; j++) {
94: aj[B->i[i] + j] = indices[aj[B->i[i] + j]];
95: }
96: }
97: B->nbs = ec;
98: baij->B->cmap->n = baij->B->cmap->N = ec*mat->rmap->bs;
100: PetscLayoutSetUp((baij->B->cmap));
101: PetscFree(indices);
102: #endif
104: /* create local vector that is used to scatter into */
105: VecCreateSeq(PETSC_COMM_SELF,ec*bs,&baij->lvec);
107: /* create two temporary index sets for building scatter-gather */
108: ISCreateBlock(PETSC_COMM_SELF,bs,ec,garray,PETSC_COPY_VALUES,&from);
110: PetscMalloc1(ec+1,&stmp);
111: for (i=0; i<ec; i++) stmp[i] = i;
112: ISCreateBlock(PETSC_COMM_SELF,bs,ec,stmp,PETSC_OWN_POINTER,&to);
114: /* create temporary global vector to generate scatter context */
115: VecCreateMPIWithArray(PetscObjectComm((PetscObject)mat),1,mat->cmap->n,mat->cmap->N,NULL,&gvec);
117: VecScatterCreate(gvec,from,baij->lvec,to,&baij->Mvctx);
119: PetscLogObjectParent((PetscObject)mat,(PetscObject)baij->Mvctx);
120: PetscLogObjectParent((PetscObject)mat,(PetscObject)baij->lvec);
121: PetscLogObjectParent((PetscObject)mat,(PetscObject)from);
122: PetscLogObjectParent((PetscObject)mat,(PetscObject)to);
124: baij->garray = garray;
126: PetscLogObjectMemory((PetscObject)mat,(ec+1)*sizeof(PetscInt));
127: ISDestroy(&from);
128: ISDestroy(&to);
129: VecDestroy(&gvec);
130: return(0);
131: }
133: /*
134: Takes the local part of an already assembled MPIBAIJ matrix
135: and disassembles it. This is to allow new nonzeros into the matrix
136: that require more communication in the matrix vector multiply.
137: Thus certain data-structures must be rebuilt.
139: Kind of slow! But that's what application programmers get when
140: they are sloppy.
141: */
142: PetscErrorCode MatDisAssemble_MPIBAIJ(Mat A)
143: {
144: Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data;
145: Mat B = baij->B,Bnew;
146: Mat_SeqBAIJ *Bbaij = (Mat_SeqBAIJ*)B->data;
148: PetscInt i,j,mbs=Bbaij->mbs,n = A->cmap->N,col,*garray=baij->garray;
149: PetscInt bs2 = baij->bs2,*nz,ec,m = A->rmap->n;
150: MatScalar *a = Bbaij->a;
151: MatScalar *atmp;
155: /* free stuff related to matrix-vec multiply */
156: VecGetSize(baij->lvec,&ec); /* needed for PetscLogObjectMemory below */
157: VecDestroy(&baij->lvec); baij->lvec = 0;
158: VecScatterDestroy(&baij->Mvctx); baij->Mvctx = 0;
159: if (baij->colmap) {
160: #if defined(PETSC_USE_CTABLE)
161: PetscTableDestroy(&baij->colmap);
162: #else
163: PetscFree(baij->colmap);
164: PetscLogObjectMemory((PetscObject)A,-Bbaij->nbs*sizeof(PetscInt));
165: #endif
166: }
168: /* make sure that B is assembled so we can access its values */
169: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
170: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
172: /* invent new B and copy stuff over */
173: PetscMalloc1(mbs,&nz);
174: for (i=0; i<mbs; i++) {
175: nz[i] = Bbaij->i[i+1]-Bbaij->i[i];
176: }
177: MatCreate(PetscObjectComm((PetscObject)B),&Bnew);
178: MatSetSizes(Bnew,m,n,m,n);
179: MatSetType(Bnew,((PetscObject)B)->type_name);
180: MatSeqBAIJSetPreallocation(Bnew,B->rmap->bs,0,nz);
181: if (Bbaij->nonew >= 0) { /* Inherit insertion error options (if positive). */
182: ((Mat_SeqBAIJ*)Bnew->data)->nonew = Bbaij->nonew;
183: }
185: MatSetOption(Bnew,MAT_ROW_ORIENTED,PETSC_FALSE);
186: /*
187: Ensure that B's nonzerostate is monotonically increasing.
188: Or should this follow the MatSetValuesBlocked() loop to preserve B's nonzerstate across a MatDisAssemble() call?
189: */
190: Bnew->nonzerostate = B->nonzerostate;
192: for (i=0; i<mbs; i++) {
193: for (j=Bbaij->i[i]; j<Bbaij->i[i+1]; j++) {
194: col = garray[Bbaij->j[j]];
195: atmp = a + j*bs2;
196: MatSetValuesBlocked_SeqBAIJ(Bnew,1,&i,1,&col,atmp,B->insertmode);
197: }
198: }
199: MatSetOption(Bnew,MAT_ROW_ORIENTED,PETSC_TRUE);
201: PetscFree(nz);
202: PetscFree(baij->garray);
203: PetscLogObjectMemory((PetscObject)A,-ec*sizeof(PetscInt));
204: MatDestroy(&B);
205: PetscLogObjectParent((PetscObject)A,(PetscObject)Bnew);
207: baij->B = Bnew;
208: A->was_assembled = PETSC_FALSE;
209: A->assembled = PETSC_FALSE;
210: return(0);
211: }
213: /* ugly stuff added for Glenn someday we should fix this up */
215: static PetscInt *uglyrmapd = 0,*uglyrmapo = 0; /* mapping from the local ordering to the "diagonal" and "off-diagonal" parts of the local matrix */
216: static Vec uglydd = 0,uglyoo = 0; /* work vectors used to scale the two parts of the local matrix */
219: PetscErrorCode MatMPIBAIJDiagonalScaleLocalSetUp(Mat inA,Vec scale)
220: {
221: Mat_MPIBAIJ *ina = (Mat_MPIBAIJ*) inA->data; /*access private part of matrix */
222: Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)ina->B->data;
224: PetscInt bs = inA->rmap->bs,i,n,nt,j,cstart,cend,no,*garray = ina->garray,*lindices;
225: PetscInt *r_rmapd,*r_rmapo;
228: MatGetOwnershipRange(inA,&cstart,&cend);
229: MatGetSize(ina->A,NULL,&n);
230: PetscCalloc1(inA->rmap->mapping->n+1,&r_rmapd);
231: nt = 0;
232: for (i=0; i<inA->rmap->mapping->n; i++) {
233: if (inA->rmap->mapping->indices[i]*bs >= cstart && inA->rmap->mapping->indices[i]*bs < cend) {
234: nt++;
235: r_rmapd[i] = inA->rmap->mapping->indices[i] + 1;
236: }
237: }
238: if (nt*bs != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt*bs %D n %D",nt*bs,n);
239: PetscMalloc1(n+1,&uglyrmapd);
240: for (i=0; i<inA->rmap->mapping->n; i++) {
241: if (r_rmapd[i]) {
242: for (j=0; j<bs; j++) {
243: uglyrmapd[(r_rmapd[i]-1)*bs+j-cstart] = i*bs + j;
244: }
245: }
246: }
247: PetscFree(r_rmapd);
248: VecCreateSeq(PETSC_COMM_SELF,n,&uglydd);
250: PetscCalloc1(ina->Nbs+1,&lindices);
251: for (i=0; i<B->nbs; i++) {
252: lindices[garray[i]] = i+1;
253: }
254: no = inA->rmap->mapping->n - nt;
255: PetscCalloc1(inA->rmap->mapping->n+1,&r_rmapo);
256: nt = 0;
257: for (i=0; i<inA->rmap->mapping->n; i++) {
258: if (lindices[inA->rmap->mapping->indices[i]]) {
259: nt++;
260: r_rmapo[i] = lindices[inA->rmap->mapping->indices[i]];
261: }
262: }
263: if (nt > no) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt %D no %D",nt,n);
264: PetscFree(lindices);
265: PetscMalloc1(nt*bs+1,&uglyrmapo);
266: for (i=0; i<inA->rmap->mapping->n; i++) {
267: if (r_rmapo[i]) {
268: for (j=0; j<bs; j++) {
269: uglyrmapo[(r_rmapo[i]-1)*bs+j] = i*bs + j;
270: }
271: }
272: }
273: PetscFree(r_rmapo);
274: VecCreateSeq(PETSC_COMM_SELF,nt*bs,&uglyoo);
275: return(0);
276: }
278: PetscErrorCode MatMPIBAIJDiagonalScaleLocal(Mat A,Vec scale)
279: {
280: /* This routine should really be abandoned as it duplicates MatDiagonalScaleLocal */
284: PetscTryMethod(A,"MatDiagonalScaleLocal_C",(Mat,Vec),(A,scale));
285: return(0);
286: }
288: PetscErrorCode MatDiagonalScaleLocal_MPIBAIJ(Mat A,Vec scale)
289: {
290: Mat_MPIBAIJ *a = (Mat_MPIBAIJ*) A->data; /*access private part of matrix */
291: PetscErrorCode ierr;
292: PetscInt n,i;
293: PetscScalar *d,*o;
294: const PetscScalar *s;
297: if (!uglyrmapd) {
298: MatMPIBAIJDiagonalScaleLocalSetUp(A,scale);
299: }
301: VecGetArrayRead(scale,&s);
303: VecGetLocalSize(uglydd,&n);
304: VecGetArray(uglydd,&d);
305: for (i=0; i<n; i++) {
306: d[i] = s[uglyrmapd[i]]; /* copy "diagonal" (true local) portion of scale into dd vector */
307: }
308: VecRestoreArray(uglydd,&d);
309: /* column scale "diagonal" portion of local matrix */
310: MatDiagonalScale(a->A,NULL,uglydd);
312: VecGetLocalSize(uglyoo,&n);
313: VecGetArray(uglyoo,&o);
314: for (i=0; i<n; i++) {
315: o[i] = s[uglyrmapo[i]]; /* copy "off-diagonal" portion of scale into oo vector */
316: }
317: VecRestoreArrayRead(scale,&s);
318: VecRestoreArray(uglyoo,&o);
319: /* column scale "off-diagonal" portion of local matrix */
320: MatDiagonalScale(a->B,NULL,uglyoo);
321: return(0);
322: }