Actual source code: dainterp.c
petsc-3.13.6 2020-09-29
2: /*
3: Code for interpolating between grids represented by DMDAs
4: */
6: /*
7: For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does
8: not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results
9: we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some
10: consider it cleaner, but old version is turned on since it handles periodic case.
11: */
12: #define NEWVERSION 0
14: #include <petsc/private/dmdaimpl.h>
16: /*
17: Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ.
18: This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the
19: matrix type for both the operator matrices and the interpolation matrices so that users
20: can select matrix types of base MATAIJ for accelerators
21: */
22: static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype)
23: {
25: PetscInt i;
26: char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ};
27: PetscBool flg;
30: *outtype = MATAIJ;
31: for (i=0; i<3; i++) {
32: PetscStrbeginswith(intype,types[i],&flg);
33: if (flg) {
34: *outtype = intype;
35: return(0);
36: }
37: }
38: return(0);
39: }
41: PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A)
42: {
43: PetscErrorCode ierr;
44: PetscInt i,i_start,m_f,Mx;
45: const PetscInt *idx_f,*idx_c;
46: PetscInt m_ghost,m_ghost_c;
47: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
48: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
49: PetscScalar v[2],x;
50: Mat mat;
51: DMBoundaryType bx;
52: ISLocalToGlobalMapping ltog_f,ltog_c;
53: MatType mattype;
56: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
57: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
58: if (bx == DM_BOUNDARY_PERIODIC) {
59: ratio = mx/Mx;
60: if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
61: } else {
62: ratio = (mx-1)/(Mx-1);
63: if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
64: }
66: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
67: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
68: DMGetLocalToGlobalMapping(daf,<og_f);
69: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
71: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
72: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
73: DMGetLocalToGlobalMapping(dac,<og_c);
74: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
76: /* create interpolation matrix */
77: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
78: #if defined(PETSC_HAVE_CUDA)
79: /*
80: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
81: we don't want the original unconverted matrix copied to the GPU
82: */
83: if (dof > 1) {
84: MatBindToCPU(mat,PETSC_TRUE);
85: }
86: #endif
87: MatSetSizes(mat,m_f,m_c,mx,Mx);
88: ConvertToAIJ(dac->mattype,&mattype);
89: MatSetType(mat,mattype);
90: MatSeqAIJSetPreallocation(mat,2,NULL);
91: MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL);
93: /* loop over local fine grid nodes setting interpolation for those*/
94: if (!NEWVERSION) {
96: for (i=i_start; i<i_start+m_f; i++) {
97: /* convert to local "natural" numbering and then to PETSc global numbering */
98: row = idx_f[i-i_start_ghost];
100: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
101: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
102: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
104: /*
105: Only include those interpolation points that are truly
106: nonzero. Note this is very important for final grid lines
107: in x direction; since they have no right neighbor
108: */
109: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
110: nc = 0;
111: /* one left and below; or we are right on it */
112: col = (i_c-i_start_ghost_c);
113: cols[nc] = idx_c[col];
114: v[nc++] = -x + 1.0;
115: /* one right? */
116: if (i_c*ratio != i) {
117: cols[nc] = idx_c[col+1];
118: v[nc++] = x;
119: }
120: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
121: }
123: } else {
124: PetscScalar *xi;
125: PetscInt li,nxi,n;
126: PetscScalar Ni[2];
128: /* compute local coordinate arrays */
129: nxi = ratio + 1;
130: PetscMalloc1(nxi,&xi);
131: for (li=0; li<nxi; li++) {
132: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
133: }
135: for (i=i_start; i<i_start+m_f; i++) {
136: /* convert to local "natural" numbering and then to PETSc global numbering */
137: row = idx_f[(i-i_start_ghost)];
139: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
140: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
141: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
143: /* remainders */
144: li = i - ratio * (i/ratio);
145: if (i==mx-1) li = nxi-1;
147: /* corners */
148: col = (i_c-i_start_ghost_c);
149: cols[0] = idx_c[col];
150: Ni[0] = 1.0;
151: if ((li==0) || (li==nxi-1)) {
152: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
153: continue;
154: }
156: /* edges + interior */
157: /* remainders */
158: if (i==mx-1) i_c--;
160: col = (i_c-i_start_ghost_c);
161: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
162: cols[1] = idx_c[col+1];
164: Ni[0] = 0.5*(1.0-xi[li]);
165: Ni[1] = 0.5*(1.0+xi[li]);
166: for (n=0; n<2; n++) {
167: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
168: }
169: MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES);
170: }
171: PetscFree(xi);
172: }
173: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
174: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
175: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
176: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
177: MatCreateMAIJ(mat,dof,A);
178: MatDestroy(&mat);
179: return(0);
180: }
182: PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A)
183: {
184: PetscErrorCode ierr;
185: PetscInt i,i_start,m_f,Mx;
186: const PetscInt *idx_f,*idx_c;
187: ISLocalToGlobalMapping ltog_f,ltog_c;
188: PetscInt m_ghost,m_ghost_c;
189: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
190: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
191: PetscScalar v[2],x;
192: Mat mat;
193: DMBoundaryType bx;
194: MatType mattype;
197: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
198: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
199: if (bx == DM_BOUNDARY_PERIODIC) {
200: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
201: ratio = mx/Mx;
202: if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
203: } else {
204: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
205: ratio = (mx-1)/(Mx-1);
206: if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
207: }
209: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
210: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
211: DMGetLocalToGlobalMapping(daf,<og_f);
212: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
214: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
215: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
216: DMGetLocalToGlobalMapping(dac,<og_c);
217: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
219: /* create interpolation matrix */
220: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
221: #if defined(PETSC_HAVE_CUDA)
222: /*
223: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
224: we don't want the original unconverted matrix copied to the GPU
225: */
226: if (dof > 1) {
227: MatBindToCPU(mat,PETSC_TRUE);
228: }
229: #endif
230: MatSetSizes(mat,m_f,m_c,mx,Mx);
231: ConvertToAIJ(dac->mattype,&mattype);
232: MatSetType(mat,mattype);
233: MatSeqAIJSetPreallocation(mat,2,NULL);
234: MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL);
236: /* loop over local fine grid nodes setting interpolation for those*/
237: for (i=i_start; i<i_start+m_f; i++) {
238: /* convert to local "natural" numbering and then to PETSc global numbering */
239: row = idx_f[(i-i_start_ghost)];
241: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
243: /*
244: Only include those interpolation points that are truly
245: nonzero. Note this is very important for final grid lines
246: in x direction; since they have no right neighbor
247: */
248: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
249: nc = 0;
250: /* one left and below; or we are right on it */
251: col = (i_c-i_start_ghost_c);
252: cols[nc] = idx_c[col];
253: v[nc++] = -x + 1.0;
254: /* one right? */
255: if (i_c*ratio != i) {
256: cols[nc] = idx_c[col+1];
257: v[nc++] = x;
258: }
259: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
260: }
261: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
262: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
263: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
264: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
265: MatCreateMAIJ(mat,dof,A);
266: MatDestroy(&mat);
267: PetscLogFlops(5.0*m_f);
268: return(0);
269: }
271: PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A)
272: {
273: PetscErrorCode ierr;
274: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
275: const PetscInt *idx_c,*idx_f;
276: ISLocalToGlobalMapping ltog_f,ltog_c;
277: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
278: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
279: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
280: PetscMPIInt size_c,size_f,rank_f;
281: PetscScalar v[4],x,y;
282: Mat mat;
283: DMBoundaryType bx,by;
284: MatType mattype;
287: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
288: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
289: if (bx == DM_BOUNDARY_PERIODIC) {
290: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
291: ratioi = mx/Mx;
292: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
293: } else {
294: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
295: ratioi = (mx-1)/(Mx-1);
296: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
297: }
298: if (by == DM_BOUNDARY_PERIODIC) {
299: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
300: ratioj = my/My;
301: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
302: } else {
303: if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My);
304: ratioj = (my-1)/(My-1);
305: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
306: }
308: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
309: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
310: DMGetLocalToGlobalMapping(daf,<og_f);
311: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
313: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
314: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
315: DMGetLocalToGlobalMapping(dac,<og_c);
316: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
318: /*
319: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
320: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
321: processors). It's effective length is hence 4 times its normal length, this is
322: why the col_scale is multiplied by the interpolation matrix column sizes.
323: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
324: copy of the coarse vector. A bit of a hack but you do better.
326: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
327: */
328: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
329: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
330: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
331: col_scale = size_f/size_c;
332: col_shift = Mx*My*(rank_f/size_c);
334: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
335: for (j=j_start; j<j_start+n_f; j++) {
336: for (i=i_start; i<i_start+m_f; i++) {
337: /* convert to local "natural" numbering and then to PETSc global numbering */
338: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
340: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
341: j_c = (j/ratioj); /* coarse grid node below fine grid node */
343: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
344: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
345: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
346: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
348: /*
349: Only include those interpolation points that are truly
350: nonzero. Note this is very important for final grid lines
351: in x and y directions; since they have no right/top neighbors
352: */
353: nc = 0;
354: /* one left and below; or we are right on it */
355: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
356: cols[nc++] = col_shift + idx_c[col];
357: /* one right and below */
358: if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1];
359: /* one left and above */
360: if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c];
361: /* one right and above */
362: if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)];
363: MatPreallocateSet(row,nc,cols,dnz,onz);
364: }
365: }
366: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
367: #if defined(PETSC_HAVE_CUDA)
368: /*
369: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
370: we don't want the original unconverted matrix copied to the GPU
371: */
372: if (dof > 1) {
373: MatBindToCPU(mat,PETSC_TRUE);
374: }
375: #endif
376: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
377: ConvertToAIJ(dac->mattype,&mattype);
378: MatSetType(mat,mattype);
379: MatSeqAIJSetPreallocation(mat,0,dnz);
380: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
381: MatPreallocateFinalize(dnz,onz);
383: /* loop over local fine grid nodes setting interpolation for those*/
384: if (!NEWVERSION) {
386: for (j=j_start; j<j_start+n_f; j++) {
387: for (i=i_start; i<i_start+m_f; i++) {
388: /* convert to local "natural" numbering and then to PETSc global numbering */
389: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
391: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
392: j_c = (j/ratioj); /* coarse grid node below fine grid node */
394: /*
395: Only include those interpolation points that are truly
396: nonzero. Note this is very important for final grid lines
397: in x and y directions; since they have no right/top neighbors
398: */
399: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
400: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
402: nc = 0;
403: /* one left and below; or we are right on it */
404: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
405: cols[nc] = col_shift + idx_c[col];
406: v[nc++] = x*y - x - y + 1.0;
407: /* one right and below */
408: if (i_c*ratioi != i) {
409: cols[nc] = col_shift + idx_c[col+1];
410: v[nc++] = -x*y + x;
411: }
412: /* one left and above */
413: if (j_c*ratioj != j) {
414: cols[nc] = col_shift + idx_c[col+m_ghost_c];
415: v[nc++] = -x*y + y;
416: }
417: /* one right and above */
418: if (j_c*ratioj != j && i_c*ratioi != i) {
419: cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)];
420: v[nc++] = x*y;
421: }
422: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
423: }
424: }
426: } else {
427: PetscScalar Ni[4];
428: PetscScalar *xi,*eta;
429: PetscInt li,nxi,lj,neta;
431: /* compute local coordinate arrays */
432: nxi = ratioi + 1;
433: neta = ratioj + 1;
434: PetscMalloc1(nxi,&xi);
435: PetscMalloc1(neta,&eta);
436: for (li=0; li<nxi; li++) {
437: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
438: }
439: for (lj=0; lj<neta; lj++) {
440: eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
441: }
443: /* loop over local fine grid nodes setting interpolation for those*/
444: for (j=j_start; j<j_start+n_f; j++) {
445: for (i=i_start; i<i_start+m_f; i++) {
446: /* convert to local "natural" numbering and then to PETSc global numbering */
447: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
449: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
450: j_c = (j/ratioj); /* coarse grid node below fine grid node */
452: /* remainders */
453: li = i - ratioi * (i/ratioi);
454: if (i==mx-1) li = nxi-1;
455: lj = j - ratioj * (j/ratioj);
456: if (j==my-1) lj = neta-1;
458: /* corners */
459: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
460: cols[0] = col_shift + idx_c[col]; /* left, below */
461: Ni[0] = 1.0;
462: if ((li==0) || (li==nxi-1)) {
463: if ((lj==0) || (lj==neta-1)) {
464: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
465: continue;
466: }
467: }
469: /* edges + interior */
470: /* remainders */
471: if (i==mx-1) i_c--;
472: if (j==my-1) j_c--;
474: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
475: cols[0] = col_shift + idx_c[col]; /* left, below */
476: cols[1] = col_shift + idx_c[col+1]; /* right, below */
477: cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */
478: cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */
480: Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]);
481: Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]);
482: Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]);
483: Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]);
485: nc = 0;
486: if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1;
487: if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1;
488: if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1;
489: if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1;
491: MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES);
492: }
493: }
494: PetscFree(xi);
495: PetscFree(eta);
496: }
497: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
498: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
499: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
500: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
501: MatCreateMAIJ(mat,dof,A);
502: MatDestroy(&mat);
503: return(0);
504: }
506: /*
507: Contributed by Andrei Draganescu <aidraga@sandia.gov>
508: */
509: PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A)
510: {
511: PetscErrorCode ierr;
512: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
513: const PetscInt *idx_c,*idx_f;
514: ISLocalToGlobalMapping ltog_f,ltog_c;
515: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
516: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
517: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
518: PetscMPIInt size_c,size_f,rank_f;
519: PetscScalar v[4];
520: Mat mat;
521: DMBoundaryType bx,by;
522: MatType mattype;
525: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
526: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
527: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
528: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
529: ratioi = mx/Mx;
530: ratioj = my/My;
531: if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x");
532: if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y");
533: if (ratioi != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 2");
534: if (ratioj != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 2");
536: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
537: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
538: DMGetLocalToGlobalMapping(daf,<og_f);
539: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
541: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
542: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
543: DMGetLocalToGlobalMapping(dac,<og_c);
544: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
546: /*
547: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
548: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
549: processors). It's effective length is hence 4 times its normal length, this is
550: why the col_scale is multiplied by the interpolation matrix column sizes.
551: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
552: copy of the coarse vector. A bit of a hack but you do better.
554: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
555: */
556: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
557: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
558: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
559: col_scale = size_f/size_c;
560: col_shift = Mx*My*(rank_f/size_c);
562: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
563: for (j=j_start; j<j_start+n_f; j++) {
564: for (i=i_start; i<i_start+m_f; i++) {
565: /* convert to local "natural" numbering and then to PETSc global numbering */
566: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
568: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
569: j_c = (j/ratioj); /* coarse grid node below fine grid node */
571: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
572: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
573: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
574: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
576: /*
577: Only include those interpolation points that are truly
578: nonzero. Note this is very important for final grid lines
579: in x and y directions; since they have no right/top neighbors
580: */
581: nc = 0;
582: /* one left and below; or we are right on it */
583: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
584: cols[nc++] = col_shift + idx_c[col];
585: MatPreallocateSet(row,nc,cols,dnz,onz);
586: }
587: }
588: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
589: #if defined(PETSC_HAVE_CUDA)
590: /*
591: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
592: we don't want the original unconverted matrix copied to the GPU
593: */
594: if (dof > 1) {
595: MatBindToCPU(mat,PETSC_TRUE);
596: }
597: #endif
598: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
599: ConvertToAIJ(dac->mattype,&mattype);
600: MatSetType(mat,mattype);
601: MatSeqAIJSetPreallocation(mat,0,dnz);
602: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
603: MatPreallocateFinalize(dnz,onz);
605: /* loop over local fine grid nodes setting interpolation for those*/
606: for (j=j_start; j<j_start+n_f; j++) {
607: for (i=i_start; i<i_start+m_f; i++) {
608: /* convert to local "natural" numbering and then to PETSc global numbering */
609: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
611: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
612: j_c = (j/ratioj); /* coarse grid node below fine grid node */
613: nc = 0;
614: /* one left and below; or we are right on it */
615: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
616: cols[nc] = col_shift + idx_c[col];
617: v[nc++] = 1.0;
619: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
620: }
621: }
622: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
623: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
624: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
625: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
626: MatCreateMAIJ(mat,dof,A);
627: MatDestroy(&mat);
628: PetscLogFlops(13.0*m_f*n_f);
629: return(0);
630: }
632: /*
633: Contributed by Jianming Yang <jianming-yang@uiowa.edu>
634: */
635: PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A)
636: {
637: PetscErrorCode ierr;
638: PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof;
639: const PetscInt *idx_c,*idx_f;
640: ISLocalToGlobalMapping ltog_f,ltog_c;
641: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz;
642: PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol;
643: PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale;
644: PetscMPIInt size_c,size_f,rank_f;
645: PetscScalar v[8];
646: Mat mat;
647: DMBoundaryType bx,by,bz;
648: MatType mattype;
651: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
652: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
653: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
654: if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz);
655: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
656: ratioi = mx/Mx;
657: ratioj = my/My;
658: ratiol = mz/Mz;
659: if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x");
660: if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y");
661: if (ratiol*Mz != mz) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in z");
662: if (ratioi != 2 && ratioi != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 1 or 2");
663: if (ratioj != 2 && ratioj != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 1 or 2");
664: if (ratiol != 2 && ratiol != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in z must be 1 or 2");
666: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
667: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
668: DMGetLocalToGlobalMapping(daf,<og_f);
669: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
671: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
672: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
673: DMGetLocalToGlobalMapping(dac,<og_c);
674: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
676: /*
677: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
678: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
679: processors). It's effective length is hence 4 times its normal length, this is
680: why the col_scale is multiplied by the interpolation matrix column sizes.
681: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
682: copy of the coarse vector. A bit of a hack but you do better.
684: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
685: */
686: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
687: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
688: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
689: col_scale = size_f/size_c;
690: col_shift = Mx*My*Mz*(rank_f/size_c);
692: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz);
693: for (l=l_start; l<l_start+p_f; l++) {
694: for (j=j_start; j<j_start+n_f; j++) {
695: for (i=i_start; i<i_start+m_f; i++) {
696: /* convert to local "natural" numbering and then to PETSc global numbering */
697: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
699: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
700: j_c = (j/ratioj); /* coarse grid node below fine grid node */
701: l_c = (l/ratiol);
703: if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
704: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
705: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
706: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
707: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
708: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
710: /*
711: Only include those interpolation points that are truly
712: nonzero. Note this is very important for final grid lines
713: in x and y directions; since they have no right/top neighbors
714: */
715: nc = 0;
716: /* one left and below; or we are right on it */
717: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
718: cols[nc++] = col_shift + idx_c[col];
719: MatPreallocateSet(row,nc,cols,dnz,onz);
720: }
721: }
722: }
723: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
724: #if defined(PETSC_HAVE_CUDA)
725: /*
726: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
727: we don't want the original unconverted matrix copied to the GPU
728: */
729: if (dof > 1) {
730: MatBindToCPU(mat,PETSC_TRUE);
731: }
732: #endif
733: MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz);
734: ConvertToAIJ(dac->mattype,&mattype);
735: MatSetType(mat,mattype);
736: MatSeqAIJSetPreallocation(mat,0,dnz);
737: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
738: MatPreallocateFinalize(dnz,onz);
740: /* loop over local fine grid nodes setting interpolation for those*/
741: for (l=l_start; l<l_start+p_f; l++) {
742: for (j=j_start; j<j_start+n_f; j++) {
743: for (i=i_start; i<i_start+m_f; i++) {
744: /* convert to local "natural" numbering and then to PETSc global numbering */
745: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
747: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
748: j_c = (j/ratioj); /* coarse grid node below fine grid node */
749: l_c = (l/ratiol);
750: nc = 0;
751: /* one left and below; or we are right on it */
752: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
753: cols[nc] = col_shift + idx_c[col];
754: v[nc++] = 1.0;
756: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
757: }
758: }
759: }
760: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
761: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
762: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
763: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
764: MatCreateMAIJ(mat,dof,A);
765: MatDestroy(&mat);
766: PetscLogFlops(13.0*m_f*n_f*p_f);
767: return(0);
768: }
770: PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A)
771: {
772: PetscErrorCode ierr;
773: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l;
774: const PetscInt *idx_c,*idx_f;
775: ISLocalToGlobalMapping ltog_f,ltog_c;
776: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz;
777: PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok;
778: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
779: PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c;
780: PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz;
781: PetscScalar v[8],x,y,z;
782: Mat mat;
783: DMBoundaryType bx,by,bz;
784: MatType mattype;
787: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
788: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
789: if (mx == Mx) {
790: ratioi = 1;
791: } else if (bx == DM_BOUNDARY_PERIODIC) {
792: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
793: ratioi = mx/Mx;
794: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
795: } else {
796: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
797: ratioi = (mx-1)/(Mx-1);
798: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
799: }
800: if (my == My) {
801: ratioj = 1;
802: } else if (by == DM_BOUNDARY_PERIODIC) {
803: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
804: ratioj = my/My;
805: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
806: } else {
807: if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My);
808: ratioj = (my-1)/(My-1);
809: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
810: }
811: if (mz == Mz) {
812: ratiok = 1;
813: } else if (bz == DM_BOUNDARY_PERIODIC) {
814: if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz);
815: ratiok = mz/Mz;
816: if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D Mz %D",mz,Mz);
817: } else {
818: if (Mz < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be greater than 1",Mz);
819: ratiok = (mz-1)/(Mz-1);
820: if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz);
821: }
823: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
824: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
825: DMGetLocalToGlobalMapping(daf,<og_f);
826: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
828: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
829: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
830: DMGetLocalToGlobalMapping(dac,<og_c);
831: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
833: /* create interpolation matrix, determining exact preallocation */
834: MatPreallocateInitialize(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz);
835: /* loop over local fine grid nodes counting interpolating points */
836: for (l=l_start; l<l_start+p_f; l++) {
837: for (j=j_start; j<j_start+n_f; j++) {
838: for (i=i_start; i<i_start+m_f; i++) {
839: /* convert to local "natural" numbering and then to PETSc global numbering */
840: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
841: i_c = (i/ratioi);
842: j_c = (j/ratioj);
843: l_c = (l/ratiok);
844: if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
845: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
846: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
847: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
848: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
849: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
851: /*
852: Only include those interpolation points that are truly
853: nonzero. Note this is very important for final grid lines
854: in x and y directions; since they have no right/top neighbors
855: */
856: nc = 0;
857: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
858: cols[nc++] = idx_c[col];
859: if (i_c*ratioi != i) {
860: cols[nc++] = idx_c[col+1];
861: }
862: if (j_c*ratioj != j) {
863: cols[nc++] = idx_c[col+m_ghost_c];
864: }
865: if (l_c*ratiok != l) {
866: cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c];
867: }
868: if (j_c*ratioj != j && i_c*ratioi != i) {
869: cols[nc++] = idx_c[col+(m_ghost_c+1)];
870: }
871: if (j_c*ratioj != j && l_c*ratiok != l) {
872: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
873: }
874: if (i_c*ratioi != i && l_c*ratiok != l) {
875: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
876: }
877: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
878: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
879: }
880: MatPreallocateSet(row,nc,cols,dnz,onz);
881: }
882: }
883: }
884: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
885: #if defined(PETSC_HAVE_CUDA)
886: /*
887: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
888: we don't want the original unconverted matrix copied to the GPU
889: */
890: if (dof > 1) {
891: MatBindToCPU(mat,PETSC_TRUE);
892: }
893: #endif
894: MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz);
895: ConvertToAIJ(dac->mattype,&mattype);
896: MatSetType(mat,mattype);
897: MatSeqAIJSetPreallocation(mat,0,dnz);
898: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
899: MatPreallocateFinalize(dnz,onz);
901: /* loop over local fine grid nodes setting interpolation for those*/
902: if (!NEWVERSION) {
904: for (l=l_start; l<l_start+p_f; l++) {
905: for (j=j_start; j<j_start+n_f; j++) {
906: for (i=i_start; i<i_start+m_f; i++) {
907: /* convert to local "natural" numbering and then to PETSc global numbering */
908: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
910: i_c = (i/ratioi);
911: j_c = (j/ratioj);
912: l_c = (l/ratiok);
914: /*
915: Only include those interpolation points that are truly
916: nonzero. Note this is very important for final grid lines
917: in x and y directions; since they have no right/top neighbors
918: */
919: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
920: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
921: z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok);
923: nc = 0;
924: /* one left and below; or we are right on it */
925: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
927: cols[nc] = idx_c[col];
928: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
930: if (i_c*ratioi != i) {
931: cols[nc] = idx_c[col+1];
932: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
933: }
935: if (j_c*ratioj != j) {
936: cols[nc] = idx_c[col+m_ghost_c];
937: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
938: }
940: if (l_c*ratiok != l) {
941: cols[nc] = idx_c[col+m_ghost_c*n_ghost_c];
942: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
943: }
945: if (j_c*ratioj != j && i_c*ratioi != i) {
946: cols[nc] = idx_c[col+(m_ghost_c+1)];
947: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
948: }
950: if (j_c*ratioj != j && l_c*ratiok != l) {
951: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
952: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
953: }
955: if (i_c*ratioi != i && l_c*ratiok != l) {
956: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
957: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
958: }
960: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
961: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
962: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
963: }
964: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
965: }
966: }
967: }
969: } else {
970: PetscScalar *xi,*eta,*zeta;
971: PetscInt li,nxi,lj,neta,lk,nzeta,n;
972: PetscScalar Ni[8];
974: /* compute local coordinate arrays */
975: nxi = ratioi + 1;
976: neta = ratioj + 1;
977: nzeta = ratiok + 1;
978: PetscMalloc1(nxi,&xi);
979: PetscMalloc1(neta,&eta);
980: PetscMalloc1(nzeta,&zeta);
981: for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
982: for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
983: for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1));
985: for (l=l_start; l<l_start+p_f; l++) {
986: for (j=j_start; j<j_start+n_f; j++) {
987: for (i=i_start; i<i_start+m_f; i++) {
988: /* convert to local "natural" numbering and then to PETSc global numbering */
989: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
991: i_c = (i/ratioi);
992: j_c = (j/ratioj);
993: l_c = (l/ratiok);
995: /* remainders */
996: li = i - ratioi * (i/ratioi);
997: if (i==mx-1) li = nxi-1;
998: lj = j - ratioj * (j/ratioj);
999: if (j==my-1) lj = neta-1;
1000: lk = l - ratiok * (l/ratiok);
1001: if (l==mz-1) lk = nzeta-1;
1003: /* corners */
1004: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
1005: cols[0] = idx_c[col];
1006: Ni[0] = 1.0;
1007: if ((li==0) || (li==nxi-1)) {
1008: if ((lj==0) || (lj==neta-1)) {
1009: if ((lk==0) || (lk==nzeta-1)) {
1010: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
1011: continue;
1012: }
1013: }
1014: }
1016: /* edges + interior */
1017: /* remainders */
1018: if (i==mx-1) i_c--;
1019: if (j==my-1) j_c--;
1020: if (l==mz-1) l_c--;
1022: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
1023: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
1024: cols[1] = idx_c[col+1]; /* one right and below */
1025: cols[2] = idx_c[col+m_ghost_c]; /* one left and above */
1026: cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */
1028: cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */
1029: cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */
1030: cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/
1031: cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */
1033: Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1034: Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1035: Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1036: Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1038: Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1039: Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1040: Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1041: Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1043: for (n=0; n<8; n++) {
1044: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
1045: }
1046: MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES);
1048: }
1049: }
1050: }
1051: PetscFree(xi);
1052: PetscFree(eta);
1053: PetscFree(zeta);
1054: }
1055: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1056: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1057: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
1058: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
1060: MatCreateMAIJ(mat,dof,A);
1061: MatDestroy(&mat);
1062: return(0);
1063: }
1065: PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale)
1066: {
1067: PetscErrorCode ierr;
1068: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1069: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1070: DMDAStencilType stc,stf;
1071: DM_DA *ddc = (DM_DA*)dac->data;
1079: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1080: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1081: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1082: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1083: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1084: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1085: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1086: if (Mc < 2 && Mf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction");
1087: if (dimc > 1 && Nc < 2 && Nf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction");
1088: if (dimc > 2 && Pc < 2 && Pf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction");
1090: if (ddc->interptype == DMDA_Q1) {
1091: if (dimc == 1) {
1092: DMCreateInterpolation_DA_1D_Q1(dac,daf,A);
1093: } else if (dimc == 2) {
1094: DMCreateInterpolation_DA_2D_Q1(dac,daf,A);
1095: } else if (dimc == 3) {
1096: DMCreateInterpolation_DA_3D_Q1(dac,daf,A);
1097: } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1098: } else if (ddc->interptype == DMDA_Q0) {
1099: if (dimc == 1) {
1100: DMCreateInterpolation_DA_1D_Q0(dac,daf,A);
1101: } else if (dimc == 2) {
1102: DMCreateInterpolation_DA_2D_Q0(dac,daf,A);
1103: } else if (dimc == 3) {
1104: DMCreateInterpolation_DA_3D_Q0(dac,daf,A);
1105: } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1106: }
1107: if (scale) {
1108: DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale);
1109: }
1110: return(0);
1111: }
1113: PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject)
1114: {
1115: PetscErrorCode ierr;
1116: PetscInt i,i_start,m_f,Mx,dof;
1117: const PetscInt *idx_f;
1118: ISLocalToGlobalMapping ltog_f;
1119: PetscInt m_ghost,m_ghost_c;
1120: PetscInt row,i_start_ghost,mx,m_c,nc,ratioi;
1121: PetscInt i_start_c,i_start_ghost_c;
1122: PetscInt *cols;
1123: DMBoundaryType bx;
1124: Vec vecf,vecc;
1125: IS isf;
1128: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
1129: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
1130: if (bx == DM_BOUNDARY_PERIODIC) {
1131: ratioi = mx/Mx;
1132: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1133: } else {
1134: ratioi = (mx-1)/(Mx-1);
1135: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1136: }
1138: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
1139: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
1140: DMGetLocalToGlobalMapping(daf,<og_f);
1141: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1143: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
1144: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
1147: /* loop over local fine grid nodes setting interpolation for those*/
1148: nc = 0;
1149: PetscMalloc1(m_f,&cols);
1152: for (i=i_start_c; i<i_start_c+m_c; i++) {
1153: PetscInt i_f = i*ratioi;
1155: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\ni_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1157: row = idx_f[(i_f-i_start_ghost)];
1158: cols[nc++] = row;
1159: }
1161: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1162: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1163: DMGetGlobalVector(dac,&vecc);
1164: DMGetGlobalVector(daf,&vecf);
1165: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1166: DMRestoreGlobalVector(dac,&vecc);
1167: DMRestoreGlobalVector(daf,&vecf);
1168: ISDestroy(&isf);
1169: return(0);
1170: }
1172: PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject)
1173: {
1174: PetscErrorCode ierr;
1175: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
1176: const PetscInt *idx_c,*idx_f;
1177: ISLocalToGlobalMapping ltog_f,ltog_c;
1178: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c;
1179: PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj;
1180: PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
1181: PetscInt *cols;
1182: DMBoundaryType bx,by;
1183: Vec vecf,vecc;
1184: IS isf;
1187: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
1188: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
1189: if (bx == DM_BOUNDARY_PERIODIC) {
1190: ratioi = mx/Mx;
1191: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1192: } else {
1193: ratioi = (mx-1)/(Mx-1);
1194: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1195: }
1196: if (by == DM_BOUNDARY_PERIODIC) {
1197: ratioj = my/My;
1198: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
1199: } else {
1200: ratioj = (my-1)/(My-1);
1201: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
1202: }
1204: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
1205: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
1206: DMGetLocalToGlobalMapping(daf,<og_f);
1207: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1209: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
1210: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
1211: DMGetLocalToGlobalMapping(dac,<og_c);
1212: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1214: /* loop over local fine grid nodes setting interpolation for those*/
1215: nc = 0;
1216: PetscMalloc1(n_f*m_f,&cols);
1217: for (j=j_start_c; j<j_start_c+n_c; j++) {
1218: for (i=i_start_c; i<i_start_c+m_c; i++) {
1219: PetscInt i_f = i*ratioi,j_f = j*ratioj;
1220: if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
1221: j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1222: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
1223: i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1224: row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1225: cols[nc++] = row;
1226: }
1227: }
1228: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1229: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1231: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1232: DMGetGlobalVector(dac,&vecc);
1233: DMGetGlobalVector(daf,&vecf);
1234: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1235: DMRestoreGlobalVector(dac,&vecc);
1236: DMRestoreGlobalVector(daf,&vecf);
1237: ISDestroy(&isf);
1238: return(0);
1239: }
1241: PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject)
1242: {
1243: PetscErrorCode ierr;
1244: PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz;
1245: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c;
1246: PetscInt i_start_ghost,j_start_ghost,k_start_ghost;
1247: PetscInt mx,my,mz,ratioi,ratioj,ratiok;
1248: PetscInt i_start_c,j_start_c,k_start_c;
1249: PetscInt m_c,n_c,p_c;
1250: PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c;
1251: PetscInt row,nc,dof;
1252: const PetscInt *idx_c,*idx_f;
1253: ISLocalToGlobalMapping ltog_f,ltog_c;
1254: PetscInt *cols;
1255: DMBoundaryType bx,by,bz;
1256: Vec vecf,vecc;
1257: IS isf;
1260: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
1261: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
1263: if (bx == DM_BOUNDARY_PERIODIC) {
1264: ratioi = mx/Mx;
1265: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1266: } else {
1267: ratioi = (mx-1)/(Mx-1);
1268: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1269: }
1270: if (by == DM_BOUNDARY_PERIODIC) {
1271: ratioj = my/My;
1272: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
1273: } else {
1274: ratioj = (my-1)/(My-1);
1275: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
1276: }
1277: if (bz == DM_BOUNDARY_PERIODIC) {
1278: ratiok = mz/Mz;
1279: if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D My %D",mz,Mz);
1280: } else {
1281: ratiok = (mz-1)/(Mz-1);
1282: if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz);
1283: }
1285: DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f);
1286: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1287: DMGetLocalToGlobalMapping(daf,<og_f);
1288: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1290: DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c);
1291: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1292: DMGetLocalToGlobalMapping(dac,<og_c);
1293: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1296: /* loop over local fine grid nodes setting interpolation for those*/
1297: nc = 0;
1298: PetscMalloc1(n_f*m_f*p_f,&cols);
1299: for (k=k_start_c; k<k_start_c+p_c; k++) {
1300: for (j=j_start_c; j<j_start_c+n_c; j++) {
1301: for (i=i_start_c; i<i_start_c+m_c; i++) {
1302: PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok;
1303: if (k_f < k_start_ghost || k_f >= k_start_ghost+p_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1304: "k_c %D k_f %D fine ghost range [%D,%D]",k,k_f,k_start_ghost,k_start_ghost+p_ghost);
1305: if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1306: "j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1307: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1308: "i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1309: row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1310: cols[nc++] = row;
1311: }
1312: }
1313: }
1314: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1315: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1317: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1318: DMGetGlobalVector(dac,&vecc);
1319: DMGetGlobalVector(daf,&vecf);
1320: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1321: DMRestoreGlobalVector(dac,&vecc);
1322: DMRestoreGlobalVector(daf,&vecf);
1323: ISDestroy(&isf);
1324: return(0);
1325: }
1327: PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat)
1328: {
1329: PetscErrorCode ierr;
1330: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1331: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1332: DMDAStencilType stc,stf;
1333: VecScatter inject = NULL;
1340: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1341: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1342: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1343: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1344: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1345: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1346: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1347: if (Mc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction");
1348: if (dimc > 1 && Nc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction");
1349: if (dimc > 2 && Pc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction");
1351: if (dimc == 1) {
1352: DMCreateInjection_DA_1D(dac,daf,&inject);
1353: } else if (dimc == 2) {
1354: DMCreateInjection_DA_2D(dac,daf,&inject);
1355: } else if (dimc == 3) {
1356: DMCreateInjection_DA_3D(dac,daf,&inject);
1357: }
1358: MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat);
1359: VecScatterDestroy(&inject);
1360: return(0);
1361: }
1363: /*@
1364: DMCreateAggregates - Deprecated, see DMDACreateAggregates.
1366: Level: intermediate
1367: @*/
1368: PetscErrorCode DMCreateAggregates(DM dac,DM daf,Mat *mat)
1369: {
1370: return DMDACreateAggregates(dac,daf,mat);
1371: }
1373: /*@
1374: DMDACreateAggregates - Gets the aggregates that map between
1375: grids associated with two DMDAs.
1377: Collective on dmc
1379: Input Parameters:
1380: + dmc - the coarse grid DMDA
1381: - dmf - the fine grid DMDA
1383: Output Parameters:
1384: . rest - the restriction matrix (transpose of the projection matrix)
1386: Level: intermediate
1388: Note: This routine is not used by PETSc.
1389: It is not clear what its use case is and it may be removed in a future release.
1390: Users should contact petsc-maint@mcs.anl.gov if they plan to use it.
1392: .seealso: DMRefine(), DMCreateInjection(), DMCreateInterpolation()
1393: @*/
1394: PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest)
1395: {
1396: PetscErrorCode ierr;
1397: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc;
1398: PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1399: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1400: DMDAStencilType stc,stf;
1401: PetscInt i,j,l;
1402: PetscInt i_start,j_start,l_start, m_f,n_f,p_f;
1403: PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost;
1404: const PetscInt *idx_f;
1405: PetscInt i_c,j_c,l_c;
1406: PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c;
1407: PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c;
1408: const PetscInt *idx_c;
1409: PetscInt d;
1410: PetscInt a;
1411: PetscInt max_agg_size;
1412: PetscInt *fine_nodes;
1413: PetscScalar *one_vec;
1414: PetscInt fn_idx;
1415: ISLocalToGlobalMapping ltogmf,ltogmc;
1422: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1423: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1424: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1425: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1426: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1427: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1428: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1430: if (Mf < Mc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Mc %D, Mf %D", Mc, Mf);
1431: if (Nf < Nc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Nc %D, Nf %D", Nc, Nf);
1432: if (Pf < Pc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Pc %D, Pf %D", Pc, Pf);
1434: if (Pc < 0) Pc = 1;
1435: if (Pf < 0) Pf = 1;
1436: if (Nc < 0) Nc = 1;
1437: if (Nf < 0) Nf = 1;
1439: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
1440: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1442: DMGetLocalToGlobalMapping(daf,<ogmf);
1443: ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f);
1445: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
1446: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1448: DMGetLocalToGlobalMapping(dac,<ogmc);
1449: ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c);
1451: /*
1452: Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios
1453: for dimension 1 and 2 respectively.
1454: Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1)
1455: and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate.
1456: Each specific dof on the fine grid is mapped to one dof on the coarse grid.
1457: */
1459: max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1);
1461: /* create the matrix that will contain the restriction operator */
1462: MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff,
1463: max_agg_size, NULL, max_agg_size, NULL, rest);
1465: /* store nodes in the fine grid here */
1466: PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes);
1467: for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0;
1469: /* loop over all coarse nodes */
1470: for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) {
1471: for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) {
1472: for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) {
1473: for (d=0; d<dofc; d++) {
1474: /* convert to local "natural" numbering and then to PETSc global numbering */
1475: a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d;
1477: fn_idx = 0;
1478: /* Corresponding fine points are all points (i_f, j_f, l_f) such that
1479: i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc
1480: (same for other dimensions)
1481: */
1482: for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) {
1483: for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) {
1484: for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) {
1485: fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d;
1486: fn_idx++;
1487: }
1488: }
1489: }
1490: /* add all these points to one aggregate */
1491: MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
1492: }
1493: }
1494: }
1495: }
1496: ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f);
1497: ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c);
1498: PetscFree2(one_vec,fine_nodes);
1499: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
1500: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
1501: return(0);
1502: }