Actual source code: wb.c
petsc-3.12.5 2020-03-29
2: #include <petscdmda.h>
3: #include <petsc/private/pcmgimpl.h>
4: #include <petscctable.h>
6: typedef struct {
7: PCExoticType type;
8: Mat P; /* the constructed interpolation matrix */
9: PetscBool directSolve; /* use direct LU factorization to construct interpolation */
10: KSP ksp;
11: } PC_Exotic;
13: const char *const PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",0};
16: /*
17: DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
19: */
20: PetscErrorCode DMDAGetWireBasketInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
21: {
22: PetscErrorCode ierr;
23: PetscInt dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
24: PetscInt mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
25: Mat Xint, Xsurf,Xint_tmp;
26: IS isint,issurf,is,row,col;
27: ISLocalToGlobalMapping ltg;
28: MPI_Comm comm;
29: Mat A,Aii,Ais,Asi,*Aholder,iAii;
30: MatFactorInfo info;
31: PetscScalar *xsurf,*xint;
32: const PetscScalar *rxint;
33: #if defined(PETSC_USE_DEBUG_foo)
34: PetscScalar tmp;
35: #endif
36: PetscTable ht;
39: DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);
40: if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
41: if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
42: DMDAGetCorners(da,0,0,0,&m,&n,&p);
43: DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);
44: istart = istart ? -1 : 0;
45: jstart = jstart ? -1 : 0;
46: kstart = kstart ? -1 : 0;
48: /*
49: the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
50: to all the local degrees of freedom (this includes the vertices, edges and faces).
52: Xint are the subset of the interpolation into the interior
54: Xface are the interpolation onto faces but not into the interior
56: Xsurf are the interpolation onto the vertices and edges (the surfbasket)
57: Xint
58: Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
59: Xsurf
60: */
61: N = (m - istart)*(n - jstart)*(p - kstart);
62: Nint = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
63: Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
64: Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
65: Nsurf = Nface + Nwire;
66: MatCreateSeqDense(MPI_COMM_SELF,Nint,26,NULL,&Xint);
67: MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,NULL,&Xsurf);
68: MatDenseGetArray(Xsurf,&xsurf);
70: /*
71: Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
72: Xsurf will be all zero (thus making the coarse matrix singular).
73: */
74: if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
75: if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
76: if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
78: cnt = 0;
80: xsurf[cnt++] = 1;
81: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + Nsurf] = 1;
82: xsurf[cnt++ + 2*Nsurf] = 1;
84: for (j=1; j<n-1-jstart; j++) {
85: xsurf[cnt++ + 3*Nsurf] = 1;
86: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
87: xsurf[cnt++ + 5*Nsurf] = 1;
88: }
90: xsurf[cnt++ + 6*Nsurf] = 1;
91: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 7*Nsurf] = 1;
92: xsurf[cnt++ + 8*Nsurf] = 1;
94: for (k=1; k<p-1-kstart; k++) {
95: xsurf[cnt++ + 9*Nsurf] = 1;
96: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 10*Nsurf] = 1;
97: xsurf[cnt++ + 11*Nsurf] = 1;
99: for (j=1; j<n-1-jstart; j++) {
100: xsurf[cnt++ + 12*Nsurf] = 1;
101: /* these are the interior nodes */
102: xsurf[cnt++ + 13*Nsurf] = 1;
103: }
105: xsurf[cnt++ + 14*Nsurf] = 1;
106: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 15*Nsurf] = 1;
107: xsurf[cnt++ + 16*Nsurf] = 1;
108: }
110: xsurf[cnt++ + 17*Nsurf] = 1;
111: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 18*Nsurf] = 1;
112: xsurf[cnt++ + 19*Nsurf] = 1;
114: for (j=1;j<n-1-jstart;j++) {
115: xsurf[cnt++ + 20*Nsurf] = 1;
116: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 21*Nsurf] = 1;
117: xsurf[cnt++ + 22*Nsurf] = 1;
118: }
120: xsurf[cnt++ + 23*Nsurf] = 1;
121: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 24*Nsurf] = 1;
122: xsurf[cnt++ + 25*Nsurf] = 1;
125: /* interpolations only sum to 1 when using direct solver */
126: #if defined(PETSC_USE_DEBUG_foo)
127: for (i=0; i<Nsurf; i++) {
128: tmp = 0.0;
129: for (j=0; j<26; j++) tmp += xsurf[i+j*Nsurf];
130: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
131: }
132: #endif
133: MatDenseRestoreArray(Xsurf,&xsurf);
134: /* MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);*/
137: /*
138: I are the indices for all the needed vertices (in global numbering)
139: Iint are the indices for the interior values, I surf for the surface values
140: (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
141: is NOT the local DMDA ordering.)
142: IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
143: */
144: #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
145: PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);
146: PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);
147: for (k=0; k<p-kstart; k++) {
148: for (j=0; j<n-jstart; j++) {
149: for (i=0; i<m-istart; i++) {
150: II[c++] = i + j*mwidth + k*mwidth*nwidth;
152: if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
153: IIint[cint] = i + j*mwidth + k*mwidth*nwidth;
154: Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
155: } else {
156: IIsurf[csurf] = i + j*mwidth + k*mwidth*nwidth;
157: Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
158: }
159: }
160: }
161: }
162: if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
163: if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
164: if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
165: DMGetLocalToGlobalMapping(da,<g);
166: ISLocalToGlobalMappingApply(ltg,N,II,II);
167: ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);
168: ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);
169: PetscObjectGetComm((PetscObject)da,&comm);
170: ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);
171: ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);
172: ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);
173: PetscFree3(II,Iint,Isurf);
175: MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);
176: A = *Aholder;
177: PetscFree(Aholder);
179: MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);
180: MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);
181: MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);
183: /*
184: Solve for the interpolation onto the interior Xint
185: */
186: MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);
187: MatScale(Xint_tmp,-1.0);
188: if (exotic->directSolve) {
189: MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);
190: MatFactorInfoInitialize(&info);
191: MatGetOrdering(Aii,MATORDERINGND,&row,&col);
192: MatLUFactorSymbolic(iAii,Aii,row,col,&info);
193: ISDestroy(&row);
194: ISDestroy(&col);
195: MatLUFactorNumeric(iAii,Aii,&info);
196: MatMatSolve(iAii,Xint_tmp,Xint);
197: MatDestroy(&iAii);
198: } else {
199: Vec b,x;
200: PetscScalar *xint_tmp;
202: MatDenseGetArray(Xint,&xint);
203: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);
204: MatDenseGetArray(Xint_tmp,&xint_tmp);
205: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);
206: KSPSetOperators(exotic->ksp,Aii,Aii);
207: for (i=0; i<26; i++) {
208: VecPlaceArray(x,xint+i*Nint);
209: VecPlaceArray(b,xint_tmp+i*Nint);
210: KSPSolve(exotic->ksp,b,x);
211: KSPCheckSolve(exotic->ksp,pc,x);
212: VecResetArray(x);
213: VecResetArray(b);
214: }
215: MatDenseRestoreArray(Xint,&xint);
216: MatDenseRestoreArray(Xint_tmp,&xint_tmp);
217: VecDestroy(&x);
218: VecDestroy(&b);
219: }
220: MatDestroy(&Xint_tmp);
222: #if defined(PETSC_USE_DEBUG_foo)
223: MatDenseGetArrayRead(Xint,&rxint);
224: for (i=0; i<Nint; i++) {
225: tmp = 0.0;
226: for (j=0; j<26; j++) tmp += rxint[i+j*Nint];
228: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
229: }
230: MatDenseRestoreArrayRead(Xint,&rxint);
231: /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD); */
232: #endif
235: /* total vertices total faces total edges */
236: Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) + pp*(mp+1)*(np+1);
238: /*
239: For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
240: */
241: cnt = 0;
243: gl[cnt++] = 0; { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
244: { gl[cnt++] = mwidth; { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
245: gl[cnt++] = mwidth*(n-jstart-1); { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
246: {
247: gl[cnt++] = mwidth*nwidth; { gl[cnt++] = mwidth*nwidth+1;} gl[cnt++] = mwidth*nwidth+ m-istart-1;
248: { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
249: gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1); { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
250: }
251: gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + m-istart-1;
252: { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth; { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
253: gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
255: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
256: /* convert that to global numbering and get them on all processes */
257: ISLocalToGlobalMappingApply(ltg,26,gl,gl);
258: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
259: PetscMalloc1(26*mp*np*pp,&globals);
260: MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,PetscObjectComm((PetscObject)da));
262: /* Number the coarse grid points from 0 to Ntotal */
263: MatGetSize(Aglobal,&Nt,NULL);
264: PetscTableCreate(Ntotal/3,Nt+1,&ht);
265: for (i=0; i<26*mp*np*pp; i++) {
266: PetscTableAddCount(ht,globals[i]+1);
267: }
268: PetscTableGetCount(ht,&cnt);
269: if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
270: PetscFree(globals);
271: for (i=0; i<26; i++) {
272: PetscTableFind(ht,gl[i]+1,&gl[i]);
273: gl[i]--;
274: }
275: PetscTableDestroy(&ht);
276: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
278: /* construct global interpolation matrix */
279: MatGetLocalSize(Aglobal,&Ng,NULL);
280: if (reuse == MAT_INITIAL_MATRIX) {
281: MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint+Nsurf,NULL,P);
282: } else {
283: MatZeroEntries(*P);
284: }
285: MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);
286: MatDenseGetArrayRead(Xint,&rxint);
287: MatSetValues(*P,Nint,IIint,26,gl,rxint,INSERT_VALUES);
288: MatDenseRestoreArrayRead(Xint,&rxint);
289: MatDenseGetArrayRead(Xsurf,&rxint);
290: MatSetValues(*P,Nsurf,IIsurf,26,gl,rxint,INSERT_VALUES);
291: MatDenseRestoreArrayRead(Xsurf,&rxint);
292: MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);
293: MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);
294: PetscFree2(IIint,IIsurf);
296: #if defined(PETSC_USE_DEBUG_foo)
297: {
298: Vec x,y;
299: PetscScalar *yy;
300: VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);
301: VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);
302: VecSet(x,1.0);
303: MatMult(*P,x,y);
304: VecGetArray(y,&yy);
305: for (i=0; i<Ng; i++) {
306: if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
307: }
308: VecRestoreArray(y,&yy);
309: VecDestroy(x);
310: VecDestroy(y);
311: }
312: #endif
314: MatDestroy(&Aii);
315: MatDestroy(&Ais);
316: MatDestroy(&Asi);
317: MatDestroy(&A);
318: ISDestroy(&is);
319: ISDestroy(&isint);
320: ISDestroy(&issurf);
321: MatDestroy(&Xint);
322: MatDestroy(&Xsurf);
323: return(0);
324: }
326: /*
327: DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
329: */
330: PetscErrorCode DMDAGetFaceInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
331: {
332: PetscErrorCode ierr;
333: PetscInt dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
334: PetscInt mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
335: Mat Xint, Xsurf,Xint_tmp;
336: IS isint,issurf,is,row,col;
337: ISLocalToGlobalMapping ltg;
338: MPI_Comm comm;
339: Mat A,Aii,Ais,Asi,*Aholder,iAii;
340: MatFactorInfo info;
341: PetscScalar *xsurf,*xint;
342: const PetscScalar *rxint;
343: #if defined(PETSC_USE_DEBUG_foo)
344: PetscScalar tmp;
345: #endif
346: PetscTable ht;
349: DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);
350: if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
351: if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
352: DMDAGetCorners(da,0,0,0,&m,&n,&p);
353: DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);
354: istart = istart ? -1 : 0;
355: jstart = jstart ? -1 : 0;
356: kstart = kstart ? -1 : 0;
358: /*
359: the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
360: to all the local degrees of freedom (this includes the vertices, edges and faces).
362: Xint are the subset of the interpolation into the interior
364: Xface are the interpolation onto faces but not into the interior
366: Xsurf are the interpolation onto the vertices and edges (the surfbasket)
367: Xint
368: Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
369: Xsurf
370: */
371: N = (m - istart)*(n - jstart)*(p - kstart);
372: Nint = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
373: Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
374: Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
375: Nsurf = Nface + Nwire;
376: MatCreateSeqDense(MPI_COMM_SELF,Nint,6,NULL,&Xint);
377: MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,NULL,&Xsurf);
378: MatDenseGetArray(Xsurf,&xsurf);
380: /*
381: Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
382: Xsurf will be all zero (thus making the coarse matrix singular).
383: */
384: if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
385: if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
386: if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
388: cnt = 0;
389: for (j=1; j<n-1-jstart; j++) {
390: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 0*Nsurf] = 1;
391: }
393: for (k=1; k<p-1-kstart; k++) {
394: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 1*Nsurf] = 1;
395: for (j=1; j<n-1-jstart; j++) {
396: xsurf[cnt++ + 2*Nsurf] = 1;
397: /* these are the interior nodes */
398: xsurf[cnt++ + 3*Nsurf] = 1;
399: }
400: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
401: }
402: for (j=1;j<n-1-jstart;j++) {
403: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 5*Nsurf] = 1;
404: }
406: #if defined(PETSC_USE_DEBUG_foo)
407: for (i=0; i<Nsurf; i++) {
408: tmp = 0.0;
409: for (j=0; j<6; j++) tmp += xsurf[i+j*Nsurf];
411: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
412: }
413: #endif
414: MatDenseRestoreArray(Xsurf,&xsurf);
415: /* MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);*/
418: /*
419: I are the indices for all the needed vertices (in global numbering)
420: Iint are the indices for the interior values, I surf for the surface values
421: (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
422: is NOT the local DMDA ordering.)
423: IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
424: */
425: #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
426: PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);
427: PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);
428: for (k=0; k<p-kstart; k++) {
429: for (j=0; j<n-jstart; j++) {
430: for (i=0; i<m-istart; i++) {
431: II[c++] = i + j*mwidth + k*mwidth*nwidth;
433: if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
434: IIint[cint] = i + j*mwidth + k*mwidth*nwidth;
435: Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
436: } else {
437: IIsurf[csurf] = i + j*mwidth + k*mwidth*nwidth;
438: Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
439: }
440: }
441: }
442: }
443: if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
444: if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
445: if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
446: DMGetLocalToGlobalMapping(da,<g);
447: ISLocalToGlobalMappingApply(ltg,N,II,II);
448: ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);
449: ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);
450: PetscObjectGetComm((PetscObject)da,&comm);
451: ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);
452: ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);
453: ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);
454: PetscFree3(II,Iint,Isurf);
456: ISSort(is);
457: MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);
458: A = *Aholder;
459: PetscFree(Aholder);
461: MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);
462: MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);
463: MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);
465: /*
466: Solve for the interpolation onto the interior Xint
467: */
468: MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);
469: MatScale(Xint_tmp,-1.0);
471: if (exotic->directSolve) {
472: MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);
473: MatFactorInfoInitialize(&info);
474: MatGetOrdering(Aii,MATORDERINGND,&row,&col);
475: MatLUFactorSymbolic(iAii,Aii,row,col,&info);
476: ISDestroy(&row);
477: ISDestroy(&col);
478: MatLUFactorNumeric(iAii,Aii,&info);
479: MatMatSolve(iAii,Xint_tmp,Xint);
480: MatDestroy(&iAii);
481: } else {
482: Vec b,x;
483: PetscScalar *xint_tmp;
485: MatDenseGetArray(Xint,&xint);
486: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);
487: MatDenseGetArray(Xint_tmp,&xint_tmp);
488: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);
489: KSPSetOperators(exotic->ksp,Aii,Aii);
490: for (i=0; i<6; i++) {
491: VecPlaceArray(x,xint+i*Nint);
492: VecPlaceArray(b,xint_tmp+i*Nint);
493: KSPSolve(exotic->ksp,b,x);
494: KSPCheckSolve(exotic->ksp,pc,x);
495: VecResetArray(x);
496: VecResetArray(b);
497: }
498: MatDenseRestoreArray(Xint,&xint);
499: MatDenseRestoreArray(Xint_tmp,&xint_tmp);
500: VecDestroy(&x);
501: VecDestroy(&b);
502: }
503: MatDestroy(&Xint_tmp);
505: #if defined(PETSC_USE_DEBUG_foo)
506: MatDenseGetArrayRead(Xint,&rxint);
507: for (i=0; i<Nint; i++) {
508: tmp = 0.0;
509: for (j=0; j<6; j++) tmp += rxint[i+j*Nint];
511: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
512: }
513: MatDenseRestoreArrayRead(Xint,&rxint);
514: /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD); */
515: #endif
518: /* total faces */
519: Ntotal = mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
521: /*
522: For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
523: */
524: cnt = 0;
525: { gl[cnt++] = mwidth+1;}
526: {
527: { gl[cnt++] = mwidth*nwidth+1;}
528: { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
529: { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
530: }
531: { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
533: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
534: /* convert that to global numbering and get them on all processes */
535: ISLocalToGlobalMappingApply(ltg,6,gl,gl);
536: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
537: PetscMalloc1(6*mp*np*pp,&globals);
538: MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,PetscObjectComm((PetscObject)da));
540: /* Number the coarse grid points from 0 to Ntotal */
541: MatGetSize(Aglobal,&Nt,NULL);
542: PetscTableCreate(Ntotal/3,Nt+1,&ht);
543: for (i=0; i<6*mp*np*pp; i++) {
544: PetscTableAddCount(ht,globals[i]+1);
545: }
546: PetscTableGetCount(ht,&cnt);
547: if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
548: PetscFree(globals);
549: for (i=0; i<6; i++) {
550: PetscTableFind(ht,gl[i]+1,&gl[i]);
551: gl[i]--;
552: }
553: PetscTableDestroy(&ht);
554: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
556: /* construct global interpolation matrix */
557: MatGetLocalSize(Aglobal,&Ng,NULL);
558: if (reuse == MAT_INITIAL_MATRIX) {
559: MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint,NULL,P);
560: } else {
561: MatZeroEntries(*P);
562: }
563: MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);
564: MatDenseGetArrayRead(Xint,&rxint);
565: MatSetValues(*P,Nint,IIint,6,gl,rxint,INSERT_VALUES);
566: MatDenseRestoreArrayRead(Xint,&rxint);
567: MatDenseGetArrayRead(Xsurf,&rxint);
568: MatSetValues(*P,Nsurf,IIsurf,6,gl,rxint,INSERT_VALUES);
569: MatDenseRestoreArrayRead(Xsurf,&rxint);
570: MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);
571: MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);
572: PetscFree2(IIint,IIsurf);
575: #if defined(PETSC_USE_DEBUG_foo)
576: {
577: Vec x,y;
578: PetscScalar *yy;
579: VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);
580: VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);
581: VecSet(x,1.0);
582: MatMult(*P,x,y);
583: VecGetArray(y,&yy);
584: for (i=0; i<Ng; i++) {
585: if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
586: }
587: VecRestoreArray(y,&yy);
588: VecDestroy(x);
589: VecDestroy(y);
590: }
591: #endif
593: MatDestroy(&Aii);
594: MatDestroy(&Ais);
595: MatDestroy(&Asi);
596: MatDestroy(&A);
597: ISDestroy(&is);
598: ISDestroy(&isint);
599: ISDestroy(&issurf);
600: MatDestroy(&Xint);
601: MatDestroy(&Xsurf);
602: return(0);
603: }
606: /*@
607: PCExoticSetType - Sets the type of coarse grid interpolation to use
609: Logically Collective on PC
611: Input Parameters:
612: + pc - the preconditioner context
613: - type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
615: Notes:
616: The face based interpolation has 1 degree of freedom per face and ignores the
617: edge and vertex values completely in the coarse problem. For any seven point
618: stencil the interpolation of a constant on all faces into the interior is that constant.
620: The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
621: per face. A constant on the subdomain boundary is interpolated as that constant
622: in the interior of the domain.
624: The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
625: if A is nonsingular A_c is also nonsingular.
627: Both interpolations are suitable for only scalar problems.
629: Level: intermediate
632: .seealso: PCEXOTIC, PCExoticType()
633: @*/
634: PetscErrorCode PCExoticSetType(PC pc,PCExoticType type)
635: {
641: PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));
642: return(0);
643: }
645: static PetscErrorCode PCExoticSetType_Exotic(PC pc,PCExoticType type)
646: {
647: PC_MG *mg = (PC_MG*)pc->data;
648: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
651: ctx->type = type;
652: return(0);
653: }
655: PetscErrorCode PCSetUp_Exotic(PC pc)
656: {
658: Mat A;
659: PC_MG *mg = (PC_MG*)pc->data;
660: PC_Exotic *ex = (PC_Exotic*) mg->innerctx;
661: MatReuse reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
664: if (!pc->dm) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
665: PCGetOperators(pc,NULL,&A);
666: if (ex->type == PC_EXOTIC_FACE) {
667: DMDAGetFaceInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);
668: } else if (ex->type == PC_EXOTIC_WIREBASKET) {
669: DMDAGetWireBasketInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);
670: } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
671: PCMGSetInterpolation(pc,1,ex->P);
672: /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
673: PCSetDM(pc,NULL);
674: PCSetUp_MG(pc);
675: return(0);
676: }
678: PetscErrorCode PCDestroy_Exotic(PC pc)
679: {
681: PC_MG *mg = (PC_MG*)pc->data;
682: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
685: MatDestroy(&ctx->P);
686: KSPDestroy(&ctx->ksp);
687: PetscFree(ctx);
688: PCDestroy_MG(pc);
689: return(0);
690: }
692: PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
693: {
694: PC_MG *mg = (PC_MG*)pc->data;
696: PetscBool iascii;
697: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
700: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
701: if (iascii) {
702: PetscViewerASCIIPrintf(viewer," Exotic type = %s\n",PCExoticTypes[ctx->type]);
703: if (ctx->directSolve) {
704: PetscViewerASCIIPrintf(viewer," Using direct solver to construct interpolation\n");
705: } else {
706: PetscViewer sviewer;
707: PetscMPIInt rank;
709: PetscViewerASCIIPrintf(viewer," Using iterative solver to construct interpolation\n");
710: PetscViewerASCIIPushTab(viewer);
711: PetscViewerASCIIPushTab(viewer); /* should not need to push this twice? */
712: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
713: MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);
714: if (!rank) {
715: KSPView(ctx->ksp,sviewer);
716: }
717: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
718: PetscViewerASCIIPopTab(viewer);
719: PetscViewerASCIIPopTab(viewer);
720: }
721: }
722: PCView_MG(pc,viewer);
723: return(0);
724: }
726: PetscErrorCode PCSetFromOptions_Exotic(PetscOptionItems *PetscOptionsObject,PC pc)
727: {
729: PetscBool flg;
730: PC_MG *mg = (PC_MG*)pc->data;
731: PCExoticType mgctype;
732: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
735: PetscOptionsHead(PetscOptionsObject,"Exotic coarse space options");
736: PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);
737: if (flg) {
738: PCExoticSetType(pc,mgctype);
739: }
740: PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,NULL);
741: if (!ctx->directSolve) {
742: if (!ctx->ksp) {
743: const char *prefix;
744: KSPCreate(PETSC_COMM_SELF,&ctx->ksp);
745: KSPSetErrorIfNotConverged(ctx->ksp,pc->erroriffailure);
746: PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);
747: PetscLogObjectParent((PetscObject)pc,(PetscObject)ctx->ksp);
748: PCGetOptionsPrefix(pc,&prefix);
749: KSPSetOptionsPrefix(ctx->ksp,prefix);
750: KSPAppendOptionsPrefix(ctx->ksp,"exotic_");
751: }
752: KSPSetFromOptions(ctx->ksp);
753: }
754: PetscOptionsTail();
755: return(0);
756: }
759: /*MC
760: PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
762: This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
763: grid spaces.
765: Notes:
766: By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
768: References:
769: + 1. - These coarse grid spaces originate in the work of Bramble, Pasciak and Schatz, "The Construction
770: of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53, 1989.
771: . 2. - They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
772: New York University, 1990.
773: . 3. - They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
774: of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
775: Analysis, volume 31. 1994. These were developed in the context of iterative substructuring preconditioners.
776: . 4. - They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
777: They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
778: Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
779: Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
780: of the 17th International Conference on Domain Decomposition Methods in
781: Science and Engineering, held in Strobl, Austria, 2006, number 60 in
782: Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007.
783: . 5. - Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy minimizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
784: Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
785: of the 17th International Conference on Domain Decomposition Methods
786: in Science and Engineering, held in Strobl, Austria, 2006, number 60 in
787: Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007
788: . 6. - Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
789: for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
790: Numer. Anal., 46(4), 2008.
791: - 7. - Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
792: algorithm for almost incompressible elasticity. Technical Report
793: TR2008 912, Department of Computer Science, Courant Institute
794: of Mathematical Sciences, New York University, May 2008. URL:
796: Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
797: -pc_mg_type <type>
799: Level: advanced
801: .seealso: PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
802: M*/
804: PETSC_EXTERN PetscErrorCode PCCreate_Exotic(PC pc)
805: {
807: PC_Exotic *ex;
808: PC_MG *mg;
811: /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
812: if (pc->ops->destroy) {
813: (*pc->ops->destroy)(pc);
814: pc->data = 0;
815: }
816: PetscFree(((PetscObject)pc)->type_name);
817: ((PetscObject)pc)->type_name = 0;
819: PCSetType(pc,PCMG);
820: PCMGSetLevels(pc,2,NULL);
821: PCMGSetGalerkin(pc,PC_MG_GALERKIN_PMAT);
822: PetscNew(&ex); \
823: ex->type = PC_EXOTIC_FACE;
824: mg = (PC_MG*) pc->data;
825: mg->innerctx = ex;
828: pc->ops->setfromoptions = PCSetFromOptions_Exotic;
829: pc->ops->view = PCView_Exotic;
830: pc->ops->destroy = PCDestroy_Exotic;
831: pc->ops->setup = PCSetUp_Exotic;
833: PetscObjectComposeFunction((PetscObject)pc,"PCExoticSetType_C",PCExoticSetType_Exotic);
834: return(0);
835: }