Actual source code: wb.c
petsc-3.6.1 2015-08-06
2: #include <petscdmda.h> /*I "petscdmda.h" I*/
3: #include <petsc/private/pcmgimpl.h> /*I "petscksp.h" I*/
4: #include <petscctable.h>
6: typedef struct {
7: PCExoticType type;
8: Mat P; /* the constructed interpolation matrix */
9: PetscBool directSolve; /* use direct LU factorization to construct interpolation */
10: KSP ksp;
11: } PC_Exotic;
13: const char *const PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",0};
18: /*
19: DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
21: */
22: PetscErrorCode DMDAGetWireBasketInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
23: {
24: PetscErrorCode ierr;
25: PetscInt dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
26: PetscInt mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
27: Mat Xint, Xsurf,Xint_tmp;
28: IS isint,issurf,is,row,col;
29: ISLocalToGlobalMapping ltg;
30: MPI_Comm comm;
31: Mat A,Aii,Ais,Asi,*Aholder,iAii;
32: MatFactorInfo info;
33: PetscScalar *xsurf,*xint;
34: #if defined(PETSC_USE_DEBUG_foo)
35: PetscScalar tmp;
36: #endif
37: PetscTable ht;
40: DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);
41: if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
42: if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
43: DMDAGetCorners(da,0,0,0,&m,&n,&p);
44: DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);
45: istart = istart ? -1 : 0;
46: jstart = jstart ? -1 : 0;
47: kstart = kstart ? -1 : 0;
49: /*
50: the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
51: to all the local degrees of freedom (this includes the vertices, edges and faces).
53: Xint are the subset of the interpolation into the interior
55: Xface are the interpolation onto faces but not into the interior
57: Xsurf are the interpolation onto the vertices and edges (the surfbasket)
58: Xint
59: Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
60: Xsurf
61: */
62: N = (m - istart)*(n - jstart)*(p - kstart);
63: Nint = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
64: Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
65: Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
66: Nsurf = Nface + Nwire;
67: MatCreateSeqDense(MPI_COMM_SELF,Nint,26,NULL,&Xint);
68: MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,NULL,&Xsurf);
69: MatDenseGetArray(Xsurf,&xsurf);
71: /*
72: Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
73: Xsurf will be all zero (thus making the coarse matrix singular).
74: */
75: if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
76: if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
77: if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
79: cnt = 0;
81: xsurf[cnt++] = 1;
82: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + Nsurf] = 1;
83: xsurf[cnt++ + 2*Nsurf] = 1;
85: for (j=1; j<n-1-jstart; j++) {
86: xsurf[cnt++ + 3*Nsurf] = 1;
87: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
88: xsurf[cnt++ + 5*Nsurf] = 1;
89: }
91: xsurf[cnt++ + 6*Nsurf] = 1;
92: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 7*Nsurf] = 1;
93: xsurf[cnt++ + 8*Nsurf] = 1;
95: for (k=1; k<p-1-kstart; k++) {
96: xsurf[cnt++ + 9*Nsurf] = 1;
97: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 10*Nsurf] = 1;
98: xsurf[cnt++ + 11*Nsurf] = 1;
100: for (j=1; j<n-1-jstart; j++) {
101: xsurf[cnt++ + 12*Nsurf] = 1;
102: /* these are the interior nodes */
103: xsurf[cnt++ + 13*Nsurf] = 1;
104: }
106: xsurf[cnt++ + 14*Nsurf] = 1;
107: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 15*Nsurf] = 1;
108: xsurf[cnt++ + 16*Nsurf] = 1;
109: }
111: xsurf[cnt++ + 17*Nsurf] = 1;
112: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 18*Nsurf] = 1;
113: xsurf[cnt++ + 19*Nsurf] = 1;
115: for (j=1;j<n-1-jstart;j++) {
116: xsurf[cnt++ + 20*Nsurf] = 1;
117: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 21*Nsurf] = 1;
118: xsurf[cnt++ + 22*Nsurf] = 1;
119: }
121: xsurf[cnt++ + 23*Nsurf] = 1;
122: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 24*Nsurf] = 1;
123: xsurf[cnt++ + 25*Nsurf] = 1;
126: /* interpolations only sum to 1 when using direct solver */
127: #if defined(PETSC_USE_DEBUG_foo)
128: for (i=0; i<Nsurf; i++) {
129: tmp = 0.0;
130: for (j=0; j<26; j++) tmp += xsurf[i+j*Nsurf];
131: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
132: }
133: #endif
134: MatDenseRestoreArray(Xsurf,&xsurf);
135: /* MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);*/
138: /*
139: I are the indices for all the needed vertices (in global numbering)
140: Iint are the indices for the interior values, I surf for the surface values
141: (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
142: is NOT the local DMDA ordering.)
143: IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
144: */
145: #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
146: PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);
147: PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);
148: for (k=0; k<p-kstart; k++) {
149: for (j=0; j<n-jstart; j++) {
150: for (i=0; i<m-istart; i++) {
151: II[c++] = i + j*mwidth + k*mwidth*nwidth;
153: if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
154: IIint[cint] = i + j*mwidth + k*mwidth*nwidth;
155: Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
156: } else {
157: IIsurf[csurf] = i + j*mwidth + k*mwidth*nwidth;
158: Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
159: }
160: }
161: }
162: }
163: if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
164: if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
165: if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
166: DMGetLocalToGlobalMapping(da,<g);
167: ISLocalToGlobalMappingApply(ltg,N,II,II);
168: ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);
169: ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);
170: PetscObjectGetComm((PetscObject)da,&comm);
171: ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);
172: ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);
173: ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);
174: PetscFree3(II,Iint,Isurf);
176: MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);
177: A = *Aholder;
178: PetscFree(Aholder);
180: MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);
181: MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);
182: MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);
184: /*
185: Solve for the interpolation onto the interior Xint
186: */
187: MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);
188: MatScale(Xint_tmp,-1.0);
189: if (exotic->directSolve) {
190: MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);
191: MatFactorInfoInitialize(&info);
192: MatGetOrdering(Aii,MATORDERINGND,&row,&col);
193: MatLUFactorSymbolic(iAii,Aii,row,col,&info);
194: ISDestroy(&row);
195: ISDestroy(&col);
196: MatLUFactorNumeric(iAii,Aii,&info);
197: MatMatSolve(iAii,Xint_tmp,Xint);
198: MatDestroy(&iAii);
199: } else {
200: Vec b,x;
201: PetscScalar *xint_tmp;
203: MatDenseGetArray(Xint,&xint);
204: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);
205: MatDenseGetArray(Xint_tmp,&xint_tmp);
206: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);
207: KSPSetOperators(exotic->ksp,Aii,Aii);
208: for (i=0; i<26; i++) {
209: VecPlaceArray(x,xint+i*Nint);
210: VecPlaceArray(b,xint_tmp+i*Nint);
211: KSPSolve(exotic->ksp,b,x);
212: VecResetArray(x);
213: VecResetArray(b);
214: }
215: MatDenseRestoreArray(Xint,&xint);
216: MatDenseRestoreArray(Xint_tmp,&xint_tmp);
217: VecDestroy(&x);
218: VecDestroy(&b);
219: }
220: MatDestroy(&Xint_tmp);
222: #if defined(PETSC_USE_DEBUG_foo)
223: MatDenseGetArray(Xint,&xint);
224: for (i=0; i<Nint; i++) {
225: tmp = 0.0;
226: for (j=0; j<26; j++) tmp += xint[i+j*Nint];
228: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
229: }
230: MatDenseRestoreArray(Xint,&xint);
231: /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD); */
232: #endif
235: /* total vertices total faces total edges */
236: Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) + pp*(mp+1)*(np+1);
238: /*
239: For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
240: */
241: cnt = 0;
243: gl[cnt++] = 0; { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
244: { gl[cnt++] = mwidth; { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
245: gl[cnt++] = mwidth*(n-jstart-1); { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
246: {
247: gl[cnt++] = mwidth*nwidth; { gl[cnt++] = mwidth*nwidth+1;} gl[cnt++] = mwidth*nwidth+ m-istart-1;
248: { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
249: gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1); { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
250: }
251: gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + m-istart-1;
252: { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth; { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
253: gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
255: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
256: /* convert that to global numbering and get them on all processes */
257: ISLocalToGlobalMappingApply(ltg,26,gl,gl);
258: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
259: PetscMalloc1(26*mp*np*pp,&globals);
260: MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,PetscObjectComm((PetscObject)da));
262: /* Number the coarse grid points from 0 to Ntotal */
263: MatGetSize(Aglobal,&Nt,NULL);
264: PetscTableCreate(Ntotal/3,Nt+1,&ht);
265: for (i=0; i<26*mp*np*pp; i++) {
266: PetscTableAddCount(ht,globals[i]+1);
267: }
268: PetscTableGetCount(ht,&cnt);
269: if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
270: PetscFree(globals);
271: for (i=0; i<26; i++) {
272: PetscTableFind(ht,gl[i]+1,&gl[i]);
273: gl[i]--;
274: }
275: PetscTableDestroy(&ht);
276: /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
278: /* construct global interpolation matrix */
279: MatGetLocalSize(Aglobal,&Ng,NULL);
280: if (reuse == MAT_INITIAL_MATRIX) {
281: MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint+Nsurf,NULL,P);
282: } else {
283: MatZeroEntries(*P);
284: }
285: MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);
286: MatDenseGetArray(Xint,&xint);
287: MatSetValues(*P,Nint,IIint,26,gl,xint,INSERT_VALUES);
288: MatDenseRestoreArray(Xint,&xint);
289: MatDenseGetArray(Xsurf,&xsurf);
290: MatSetValues(*P,Nsurf,IIsurf,26,gl,xsurf,INSERT_VALUES);
291: MatDenseRestoreArray(Xsurf,&xsurf);
292: MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);
293: MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);
294: PetscFree2(IIint,IIsurf);
296: #if defined(PETSC_USE_DEBUG_foo)
297: {
298: Vec x,y;
299: PetscScalar *yy;
300: VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);
301: VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);
302: VecSet(x,1.0);
303: MatMult(*P,x,y);
304: VecGetArray(y,&yy);
305: for (i=0; i<Ng; i++) {
306: if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
307: }
308: VecRestoreArray(y,&yy);
309: VecDestroy(x);
310: VecDestroy(y);
311: }
312: #endif
314: MatDestroy(&Aii);
315: MatDestroy(&Ais);
316: MatDestroy(&Asi);
317: MatDestroy(&A);
318: ISDestroy(&is);
319: ISDestroy(&isint);
320: ISDestroy(&issurf);
321: MatDestroy(&Xint);
322: MatDestroy(&Xsurf);
323: return(0);
324: }
328: /*
329: DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
331: */
332: PetscErrorCode DMDAGetFaceInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
333: {
334: PetscErrorCode ierr;
335: PetscInt dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
336: PetscInt mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
337: Mat Xint, Xsurf,Xint_tmp;
338: IS isint,issurf,is,row,col;
339: ISLocalToGlobalMapping ltg;
340: MPI_Comm comm;
341: Mat A,Aii,Ais,Asi,*Aholder,iAii;
342: MatFactorInfo info;
343: PetscScalar *xsurf,*xint;
344: #if defined(PETSC_USE_DEBUG_foo)
345: PetscScalar tmp;
346: #endif
347: PetscTable ht;
350: DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);
351: if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
352: if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
353: DMDAGetCorners(da,0,0,0,&m,&n,&p);
354: DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);
355: istart = istart ? -1 : 0;
356: jstart = jstart ? -1 : 0;
357: kstart = kstart ? -1 : 0;
359: /*
360: the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
361: to all the local degrees of freedom (this includes the vertices, edges and faces).
363: Xint are the subset of the interpolation into the interior
365: Xface are the interpolation onto faces but not into the interior
367: Xsurf are the interpolation onto the vertices and edges (the surfbasket)
368: Xint
369: Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
370: Xsurf
371: */
372: N = (m - istart)*(n - jstart)*(p - kstart);
373: Nint = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
374: Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
375: Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
376: Nsurf = Nface + Nwire;
377: MatCreateSeqDense(MPI_COMM_SELF,Nint,6,NULL,&Xint);
378: MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,NULL,&Xsurf);
379: MatDenseGetArray(Xsurf,&xsurf);
381: /*
382: Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
383: Xsurf will be all zero (thus making the coarse matrix singular).
384: */
385: if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
386: if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
387: if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
389: cnt = 0;
390: for (j=1; j<n-1-jstart; j++) {
391: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 0*Nsurf] = 1;
392: }
394: for (k=1; k<p-1-kstart; k++) {
395: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 1*Nsurf] = 1;
396: for (j=1; j<n-1-jstart; j++) {
397: xsurf[cnt++ + 2*Nsurf] = 1;
398: /* these are the interior nodes */
399: xsurf[cnt++ + 3*Nsurf] = 1;
400: }
401: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
402: }
403: for (j=1;j<n-1-jstart;j++) {
404: for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 5*Nsurf] = 1;
405: }
407: #if defined(PETSC_USE_DEBUG_foo)
408: for (i=0; i<Nsurf; i++) {
409: tmp = 0.0;
410: for (j=0; j<6; j++) tmp += xsurf[i+j*Nsurf];
412: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
413: }
414: #endif
415: MatDenseRestoreArray(Xsurf,&xsurf);
416: /* MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);*/
419: /*
420: I are the indices for all the needed vertices (in global numbering)
421: Iint are the indices for the interior values, I surf for the surface values
422: (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
423: is NOT the local DMDA ordering.)
424: IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
425: */
426: #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
427: PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);
428: PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);
429: for (k=0; k<p-kstart; k++) {
430: for (j=0; j<n-jstart; j++) {
431: for (i=0; i<m-istart; i++) {
432: II[c++] = i + j*mwidth + k*mwidth*nwidth;
434: if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
435: IIint[cint] = i + j*mwidth + k*mwidth*nwidth;
436: Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
437: } else {
438: IIsurf[csurf] = i + j*mwidth + k*mwidth*nwidth;
439: Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
440: }
441: }
442: }
443: }
444: if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
445: if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
446: if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
447: DMGetLocalToGlobalMapping(da,<g);
448: ISLocalToGlobalMappingApply(ltg,N,II,II);
449: ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);
450: ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);
451: PetscObjectGetComm((PetscObject)da,&comm);
452: ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);
453: ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);
454: ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);
455: PetscFree3(II,Iint,Isurf);
457: ISSort(is);
458: MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);
459: A = *Aholder;
460: PetscFree(Aholder);
462: MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);
463: MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);
464: MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);
466: /*
467: Solve for the interpolation onto the interior Xint
468: */
469: MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);
470: MatScale(Xint_tmp,-1.0);
472: if (exotic->directSolve) {
473: MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);
474: MatFactorInfoInitialize(&info);
475: MatGetOrdering(Aii,MATORDERINGND,&row,&col);
476: MatLUFactorSymbolic(iAii,Aii,row,col,&info);
477: ISDestroy(&row);
478: ISDestroy(&col);
479: MatLUFactorNumeric(iAii,Aii,&info);
480: MatMatSolve(iAii,Xint_tmp,Xint);
481: MatDestroy(&iAii);
482: } else {
483: Vec b,x;
484: PetscScalar *xint_tmp;
486: MatDenseGetArray(Xint,&xint);
487: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);
488: MatDenseGetArray(Xint_tmp,&xint_tmp);
489: VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);
490: KSPSetOperators(exotic->ksp,Aii,Aii);
491: for (i=0; i<6; i++) {
492: VecPlaceArray(x,xint+i*Nint);
493: VecPlaceArray(b,xint_tmp+i*Nint);
494: KSPSolve(exotic->ksp,b,x);
495: VecResetArray(x);
496: VecResetArray(b);
497: }
498: MatDenseRestoreArray(Xint,&xint);
499: MatDenseRestoreArray(Xint_tmp,&xint_tmp);
500: VecDestroy(&x);
501: VecDestroy(&b);
502: }
503: MatDestroy(&Xint_tmp);
505: #if defined(PETSC_USE_DEBUG_foo)
506: MatDenseGetArray(Xint,&xint);
507: for (i=0; i<Nint; i++) {
508: tmp = 0.0;
509: for (j=0; j<6; j++) tmp += xint[i+j*Nint];
511: if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
512: }
513: MatDenseRestoreArray(Xint,&xint);
514: /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD); */
515: #endif
518: /* total faces */
519: Ntotal = mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
521: /*
522: For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
523: */
524: cnt = 0;
525: { gl[cnt++] = mwidth+1;}
526: {
527: { gl[cnt++] = mwidth*nwidth+1;}
528: { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
529: { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
530: }
531: { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
533: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
534: /* convert that to global numbering and get them on all processes */
535: ISLocalToGlobalMappingApply(ltg,6,gl,gl);
536: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
537: PetscMalloc1(6*mp*np*pp,&globals);
538: MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,PetscObjectComm((PetscObject)da));
540: /* Number the coarse grid points from 0 to Ntotal */
541: MatGetSize(Aglobal,&Nt,NULL);
542: PetscTableCreate(Ntotal/3,Nt+1,&ht);
543: for (i=0; i<6*mp*np*pp; i++) {
544: PetscTableAddCount(ht,globals[i]+1);
545: }
546: PetscTableGetCount(ht,&cnt);
547: if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
548: PetscFree(globals);
549: for (i=0; i<6; i++) {
550: PetscTableFind(ht,gl[i]+1,&gl[i]);
551: gl[i]--;
552: }
553: PetscTableDestroy(&ht);
554: /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
556: /* construct global interpolation matrix */
557: MatGetLocalSize(Aglobal,&Ng,NULL);
558: if (reuse == MAT_INITIAL_MATRIX) {
559: MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint,NULL,P);
560: } else {
561: MatZeroEntries(*P);
562: }
563: MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);
564: MatDenseGetArray(Xint,&xint);
565: MatSetValues(*P,Nint,IIint,6,gl,xint,INSERT_VALUES);
566: MatDenseRestoreArray(Xint,&xint);
567: MatDenseGetArray(Xsurf,&xsurf);
568: MatSetValues(*P,Nsurf,IIsurf,6,gl,xsurf,INSERT_VALUES);
569: MatDenseRestoreArray(Xsurf,&xsurf);
570: MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);
571: MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);
572: PetscFree2(IIint,IIsurf);
575: #if defined(PETSC_USE_DEBUG_foo)
576: {
577: Vec x,y;
578: PetscScalar *yy;
579: VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);
580: VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);
581: VecSet(x,1.0);
582: MatMult(*P,x,y);
583: VecGetArray(y,&yy);
584: for (i=0; i<Ng; i++) {
585: if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
586: }
587: VecRestoreArray(y,&yy);
588: VecDestroy(x);
589: VecDestroy(y);
590: }
591: #endif
593: MatDestroy(&Aii);
594: MatDestroy(&Ais);
595: MatDestroy(&Asi);
596: MatDestroy(&A);
597: ISDestroy(&is);
598: ISDestroy(&isint);
599: ISDestroy(&issurf);
600: MatDestroy(&Xint);
601: MatDestroy(&Xsurf);
602: return(0);
603: }
608: /*@
609: PCExoticSetType - Sets the type of coarse grid interpolation to use
611: Logically Collective on PC
613: Input Parameters:
614: + pc - the preconditioner context
615: - type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
617: Notes: The face based interpolation has 1 degree of freedom per face and ignores the
618: edge and vertex values completely in the coarse problem. For any seven point
619: stencil the interpolation of a constant on all faces into the interior is that constant.
621: The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
622: per face. A constant on the subdomain boundary is interpolated as that constant
623: in the interior of the domain.
625: The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
626: if A is nonsingular A_c is also nonsingular.
628: Both interpolations are suitable for only scalar problems.
630: Level: intermediate
633: .seealso: PCEXOTIC, PCExoticType()
634: @*/
635: PetscErrorCode PCExoticSetType(PC pc,PCExoticType type)
636: {
642: PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));
643: return(0);
644: }
648: static PetscErrorCode PCExoticSetType_Exotic(PC pc,PCExoticType type)
649: {
650: PC_MG *mg = (PC_MG*)pc->data;
651: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
654: ctx->type = type;
655: return(0);
656: }
660: PetscErrorCode PCSetUp_Exotic(PC pc)
661: {
663: Mat A;
664: PC_MG *mg = (PC_MG*)pc->data;
665: PC_Exotic *ex = (PC_Exotic*) mg->innerctx;
666: MatReuse reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
669: if (!pc->dm) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
670: PCGetOperators(pc,NULL,&A);
671: if (ex->type == PC_EXOTIC_FACE) {
672: DMDAGetFaceInterpolation(pc->dm,ex,A,reuse,&ex->P);
673: } else if (ex->type == PC_EXOTIC_WIREBASKET) {
674: DMDAGetWireBasketInterpolation(pc->dm,ex,A,reuse,&ex->P);
675: } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
676: PCMGSetInterpolation(pc,1,ex->P);
677: /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
678: PCSetDM(pc,NULL);
679: PCSetUp_MG(pc);
680: return(0);
681: }
685: PetscErrorCode PCDestroy_Exotic(PC pc)
686: {
688: PC_MG *mg = (PC_MG*)pc->data;
689: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
692: MatDestroy(&ctx->P);
693: KSPDestroy(&ctx->ksp);
694: PetscFree(ctx);
695: PCDestroy_MG(pc);
696: return(0);
697: }
701: PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
702: {
703: PC_MG *mg = (PC_MG*)pc->data;
705: PetscBool iascii;
706: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
709: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
710: if (iascii) {
711: PetscViewerASCIIPrintf(viewer," Exotic type = %s\n",PCExoticTypes[ctx->type]);
712: if (ctx->directSolve) {
713: PetscViewerASCIIPrintf(viewer," Using direct solver to construct interpolation\n");
714: } else {
715: PetscViewer sviewer;
716: PetscMPIInt rank;
718: PetscViewerASCIIPrintf(viewer," Using iterative solver to construct interpolation\n");
719: PetscViewerASCIIPushTab(viewer);
720: PetscViewerASCIIPushTab(viewer); /* should not need to push this twice? */
721: PetscViewerGetSingleton(viewer,&sviewer);
722: MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);
723: if (!rank) {
724: KSPView(ctx->ksp,sviewer);
725: }
726: PetscViewerRestoreSingleton(viewer,&sviewer);
727: PetscViewerASCIIPopTab(viewer);
728: PetscViewerASCIIPopTab(viewer);
729: }
730: }
731: PCView_MG(pc,viewer);
732: return(0);
733: }
737: PetscErrorCode PCSetFromOptions_Exotic(PetscOptions *PetscOptionsObject,PC pc)
738: {
740: PetscBool flg;
741: PC_MG *mg = (PC_MG*)pc->data;
742: PCExoticType mgctype;
743: PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
746: PetscOptionsHead(PetscOptionsObject,"Exotic coarse space options");
747: PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);
748: if (flg) {
749: PCExoticSetType(pc,mgctype);
750: }
751: PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,NULL);
752: if (!ctx->directSolve) {
753: if (!ctx->ksp) {
754: const char *prefix;
755: KSPCreate(PETSC_COMM_SELF,&ctx->ksp);
756: KSPSetErrorIfNotConverged(ctx->ksp,pc->erroriffailure);
757: PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);
758: PetscLogObjectParent((PetscObject)pc,(PetscObject)ctx->ksp);
759: PCGetOptionsPrefix(pc,&prefix);
760: KSPSetOptionsPrefix(ctx->ksp,prefix);
761: KSPAppendOptionsPrefix(ctx->ksp,"exotic_");
762: }
763: KSPSetFromOptions(ctx->ksp);
764: }
765: PetscOptionsTail();
766: return(0);
767: }
770: /*MC
771: PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
773: This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
774: grid spaces.
776: Notes: By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
778: References: These coarse grid spaces originate in the work of Bramble, Pasciak and Schatz, "The Construction
779: of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53 pages 1--24, 1989.
780: They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
781: New York University, 1990. They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
782: of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
783: Analysis, volume 31. pages 1662-1694, 1994. These were developed in the context of iterative substructuring preconditioners.
784: They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
785: They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
786: Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
787: Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
788: of the 17th International Conference on Domain Decomposition Methods in
789: Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
790: Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 255-261, 2007.
791: Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy min-
792: imizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
793: Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
794: of the 17th International Conference on Domain Decomposition Methods
795: in Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
796: Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 247-254, 2007
797: Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
798: for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
799: Numer. Anal., 46(4):2153-2168, 2008.
800: Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
801: algorithm for almost incompressible elasticity. Technical Report
802: TR2008-912, Department of Computer Science, Courant Institute
803: of Mathematical Sciences, New York University, May 2008. URL:
804: http://cs.nyu.edu/csweb/Research/TechReports/TR2008-912/TR2008-912.pdf
806: Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
807: -pc_mg_type <type>
809: Level: advanced
811: .seealso: PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
812: M*/
816: PETSC_EXTERN PetscErrorCode PCCreate_Exotic(PC pc)
817: {
819: PC_Exotic *ex;
820: PC_MG *mg;
823: /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
824: if (pc->ops->destroy) {
825: (*pc->ops->destroy)(pc);
826: pc->data = 0;
827: }
828: PetscFree(((PetscObject)pc)->type_name);
829: ((PetscObject)pc)->type_name = 0;
831: PCSetType(pc,PCMG);
832: PCMGSetLevels(pc,2,NULL);
833: PCMGSetGalerkin(pc,PETSC_TRUE);
834: PetscNew(&ex); \
835: ex->type = PC_EXOTIC_FACE;
836: mg = (PC_MG*) pc->data;
837: mg->innerctx = ex;
840: pc->ops->setfromoptions = PCSetFromOptions_Exotic;
841: pc->ops->view = PCView_Exotic;
842: pc->ops->destroy = PCDestroy_Exotic;
843: pc->ops->setup = PCSetUp_Exotic;
845: PetscObjectComposeFunction((PetscObject)pc,"PCExoticSetType_C",PCExoticSetType_Exotic);
846: return(0);
847: }