Actual source code: dainterp.c
petsc-3.12.5 2020-03-29
2: /*
3: Code for interpolating between grids represented by DMDAs
4: */
6: /*
7: For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does
8: not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results
9: we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some
10: consider it cleaner, but old version is turned on since it handles periodic case.
11: */
12: #define NEWVERSION 0
14: #include <petsc/private/dmdaimpl.h>
16: /*@
17: DMCreateInterpolationScale - Forms L = R*1/diag(R*1) - L.*v is like a coarse grid average of the
18: nearby fine grid points.
20: Input Parameters:
21: + dac - DM that defines a coarse mesh
22: . daf - DM that defines a fine mesh
23: - mat - the restriction (or interpolation operator) from fine to coarse
25: Output Parameter:
26: . scale - the scaled vector
28: Level: developer
30: .seealso: DMCreateInterpolation()
32: @*/
33: PetscErrorCode DMCreateInterpolationScale(DM dac,DM daf,Mat mat,Vec *scale)
34: {
36: Vec fine;
37: PetscScalar one = 1.0;
40: DMCreateGlobalVector(daf,&fine);
41: DMCreateGlobalVector(dac,scale);
42: VecSet(fine,one);
43: MatRestrict(mat,fine,*scale);
44: VecDestroy(&fine);
45: VecReciprocal(*scale);
46: return(0);
47: }
49: /*
50: Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ.
51: This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the
52: matrix type for both the operator matrices and the interpolation matrices so that users
53: can select matrix types of base MATAIJ for accelerators
54: */
55: static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype)
56: {
58: PetscInt i;
59: char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ};
60: PetscBool flg;
63: *outtype = MATAIJ;
64: for (i=0; i<3; i++) {
65: PetscStrbeginswith(intype,types[i],&flg);
66: if (flg) {
67: *outtype = intype;
68: return(0);
69: }
70: }
71: return(0);
72: }
74: PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A)
75: {
76: PetscErrorCode ierr;
77: PetscInt i,i_start,m_f,Mx;
78: const PetscInt *idx_f,*idx_c;
79: PetscInt m_ghost,m_ghost_c;
80: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
81: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
82: PetscScalar v[2],x;
83: Mat mat;
84: DMBoundaryType bx;
85: ISLocalToGlobalMapping ltog_f,ltog_c;
86: MatType mattype;
89: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
90: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
91: if (bx == DM_BOUNDARY_PERIODIC) {
92: ratio = mx/Mx;
93: if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
94: } else {
95: ratio = (mx-1)/(Mx-1);
96: if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
97: }
99: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
100: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
101: DMGetLocalToGlobalMapping(daf,<og_f);
102: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
104: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
105: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
106: DMGetLocalToGlobalMapping(dac,<og_c);
107: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
109: /* create interpolation matrix */
110: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
111: #if defined(PETSC_HAVE_CUDA)
112: /*
113: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
114: we don't want the original unconverted matrix copied to the GPU
115: */
116: if (dof > 1) {
117: MatPinToCPU(mat,PETSC_TRUE);
118: }
119: #endif
120: MatSetSizes(mat,m_f,m_c,mx,Mx);
121: ConvertToAIJ(dac->mattype,&mattype);
122: MatSetType(mat,mattype);
123: MatSeqAIJSetPreallocation(mat,2,NULL);
124: MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL);
126: /* loop over local fine grid nodes setting interpolation for those*/
127: if (!NEWVERSION) {
129: for (i=i_start; i<i_start+m_f; i++) {
130: /* convert to local "natural" numbering and then to PETSc global numbering */
131: row = idx_f[i-i_start_ghost];
133: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
134: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
135: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
137: /*
138: Only include those interpolation points that are truly
139: nonzero. Note this is very important for final grid lines
140: in x direction; since they have no right neighbor
141: */
142: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
143: nc = 0;
144: /* one left and below; or we are right on it */
145: col = (i_c-i_start_ghost_c);
146: cols[nc] = idx_c[col];
147: v[nc++] = -x + 1.0;
148: /* one right? */
149: if (i_c*ratio != i) {
150: cols[nc] = idx_c[col+1];
151: v[nc++] = x;
152: }
153: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
154: }
156: } else {
157: PetscScalar *xi;
158: PetscInt li,nxi,n;
159: PetscScalar Ni[2];
161: /* compute local coordinate arrays */
162: nxi = ratio + 1;
163: PetscMalloc1(nxi,&xi);
164: for (li=0; li<nxi; li++) {
165: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
166: }
168: for (i=i_start; i<i_start+m_f; i++) {
169: /* convert to local "natural" numbering and then to PETSc global numbering */
170: row = idx_f[(i-i_start_ghost)];
172: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
173: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
174: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
176: /* remainders */
177: li = i - ratio * (i/ratio);
178: if (i==mx-1) li = nxi-1;
180: /* corners */
181: col = (i_c-i_start_ghost_c);
182: cols[0] = idx_c[col];
183: Ni[0] = 1.0;
184: if ((li==0) || (li==nxi-1)) {
185: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
186: continue;
187: }
189: /* edges + interior */
190: /* remainders */
191: if (i==mx-1) i_c--;
193: col = (i_c-i_start_ghost_c);
194: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
195: cols[1] = idx_c[col+1];
197: Ni[0] = 0.5*(1.0-xi[li]);
198: Ni[1] = 0.5*(1.0+xi[li]);
199: for (n=0; n<2; n++) {
200: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
201: }
202: MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES);
203: }
204: PetscFree(xi);
205: }
206: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
207: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
208: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
209: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
210: MatCreateMAIJ(mat,dof,A);
211: MatDestroy(&mat);
212: return(0);
213: }
215: PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A)
216: {
217: PetscErrorCode ierr;
218: PetscInt i,i_start,m_f,Mx;
219: const PetscInt *idx_f,*idx_c;
220: ISLocalToGlobalMapping ltog_f,ltog_c;
221: PetscInt m_ghost,m_ghost_c;
222: PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio;
223: PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof;
224: PetscScalar v[2],x;
225: Mat mat;
226: DMBoundaryType bx;
227: MatType mattype;
230: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
231: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
232: if (bx == DM_BOUNDARY_PERIODIC) {
233: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
234: ratio = mx/Mx;
235: if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
236: } else {
237: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
238: ratio = (mx-1)/(Mx-1);
239: if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
240: }
242: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
243: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
244: DMGetLocalToGlobalMapping(daf,<og_f);
245: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
247: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
248: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
249: DMGetLocalToGlobalMapping(dac,<og_c);
250: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
252: /* create interpolation matrix */
253: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
254: #if defined(PETSC_HAVE_CUDA)
255: /*
256: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
257: we don't want the original unconverted matrix copied to the GPU
258: */
259: if (dof > 1) {
260: MatPinToCPU(mat,PETSC_TRUE);
261: }
262: #endif
263: MatSetSizes(mat,m_f,m_c,mx,Mx);
264: ConvertToAIJ(dac->mattype,&mattype);
265: MatSetType(mat,mattype);
266: MatSeqAIJSetPreallocation(mat,2,NULL);
267: MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL);
269: /* loop over local fine grid nodes setting interpolation for those*/
270: for (i=i_start; i<i_start+m_f; i++) {
271: /* convert to local "natural" numbering and then to PETSc global numbering */
272: row = idx_f[(i-i_start_ghost)];
274: i_c = (i/ratio); /* coarse grid node to left of fine grid node */
276: /*
277: Only include those interpolation points that are truly
278: nonzero. Note this is very important for final grid lines
279: in x direction; since they have no right neighbor
280: */
281: x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio);
282: nc = 0;
283: /* one left and below; or we are right on it */
284: col = (i_c-i_start_ghost_c);
285: cols[nc] = idx_c[col];
286: v[nc++] = -x + 1.0;
287: /* one right? */
288: if (i_c*ratio != i) {
289: cols[nc] = idx_c[col+1];
290: v[nc++] = x;
291: }
292: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
293: }
294: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
295: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
296: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
297: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
298: MatCreateMAIJ(mat,dof,A);
299: MatDestroy(&mat);
300: PetscLogFlops(5.0*m_f);
301: return(0);
302: }
304: PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A)
305: {
306: PetscErrorCode ierr;
307: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
308: const PetscInt *idx_c,*idx_f;
309: ISLocalToGlobalMapping ltog_f,ltog_c;
310: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
311: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
312: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
313: PetscMPIInt size_c,size_f,rank_f;
314: PetscScalar v[4],x,y;
315: Mat mat;
316: DMBoundaryType bx,by;
317: MatType mattype;
320: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
321: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
322: if (bx == DM_BOUNDARY_PERIODIC) {
323: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
324: ratioi = mx/Mx;
325: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
326: } else {
327: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
328: ratioi = (mx-1)/(Mx-1);
329: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
330: }
331: if (by == DM_BOUNDARY_PERIODIC) {
332: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
333: ratioj = my/My;
334: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
335: } else {
336: if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My);
337: ratioj = (my-1)/(My-1);
338: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
339: }
341: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
342: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
343: DMGetLocalToGlobalMapping(daf,<og_f);
344: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
346: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
347: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
348: DMGetLocalToGlobalMapping(dac,<og_c);
349: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
351: /*
352: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
353: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
354: processors). It's effective length is hence 4 times its normal length, this is
355: why the col_scale is multiplied by the interpolation matrix column sizes.
356: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
357: copy of the coarse vector. A bit of a hack but you do better.
359: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
360: */
361: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
362: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
363: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
364: col_scale = size_f/size_c;
365: col_shift = Mx*My*(rank_f/size_c);
367: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
368: for (j=j_start; j<j_start+n_f; j++) {
369: for (i=i_start; i<i_start+m_f; i++) {
370: /* convert to local "natural" numbering and then to PETSc global numbering */
371: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
373: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
374: j_c = (j/ratioj); /* coarse grid node below fine grid node */
376: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
377: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
378: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
379: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
381: /*
382: Only include those interpolation points that are truly
383: nonzero. Note this is very important for final grid lines
384: in x and y directions; since they have no right/top neighbors
385: */
386: nc = 0;
387: /* one left and below; or we are right on it */
388: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
389: cols[nc++] = col_shift + idx_c[col];
390: /* one right and below */
391: if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1];
392: /* one left and above */
393: if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c];
394: /* one right and above */
395: if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)];
396: MatPreallocateSet(row,nc,cols,dnz,onz);
397: }
398: }
399: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
400: #if defined(PETSC_HAVE_CUDA)
401: /*
402: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
403: we don't want the original unconverted matrix copied to the GPU
404: */
405: if (dof > 1) {
406: MatPinToCPU(mat,PETSC_TRUE);
407: }
408: #endif
409: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
410: ConvertToAIJ(dac->mattype,&mattype);
411: MatSetType(mat,mattype);
412: MatSeqAIJSetPreallocation(mat,0,dnz);
413: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
414: MatPreallocateFinalize(dnz,onz);
416: /* loop over local fine grid nodes setting interpolation for those*/
417: if (!NEWVERSION) {
419: for (j=j_start; j<j_start+n_f; j++) {
420: for (i=i_start; i<i_start+m_f; i++) {
421: /* convert to local "natural" numbering and then to PETSc global numbering */
422: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
424: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
425: j_c = (j/ratioj); /* coarse grid node below fine grid node */
427: /*
428: Only include those interpolation points that are truly
429: nonzero. Note this is very important for final grid lines
430: in x and y directions; since they have no right/top neighbors
431: */
432: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
433: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
435: nc = 0;
436: /* one left and below; or we are right on it */
437: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
438: cols[nc] = col_shift + idx_c[col];
439: v[nc++] = x*y - x - y + 1.0;
440: /* one right and below */
441: if (i_c*ratioi != i) {
442: cols[nc] = col_shift + idx_c[col+1];
443: v[nc++] = -x*y + x;
444: }
445: /* one left and above */
446: if (j_c*ratioj != j) {
447: cols[nc] = col_shift + idx_c[col+m_ghost_c];
448: v[nc++] = -x*y + y;
449: }
450: /* one right and above */
451: if (j_c*ratioj != j && i_c*ratioi != i) {
452: cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)];
453: v[nc++] = x*y;
454: }
455: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
456: }
457: }
459: } else {
460: PetscScalar Ni[4];
461: PetscScalar *xi,*eta;
462: PetscInt li,nxi,lj,neta;
464: /* compute local coordinate arrays */
465: nxi = ratioi + 1;
466: neta = ratioj + 1;
467: PetscMalloc1(nxi,&xi);
468: PetscMalloc1(neta,&eta);
469: for (li=0; li<nxi; li++) {
470: xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
471: }
472: for (lj=0; lj<neta; lj++) {
473: eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
474: }
476: /* loop over local fine grid nodes setting interpolation for those*/
477: for (j=j_start; j<j_start+n_f; j++) {
478: for (i=i_start; i<i_start+m_f; i++) {
479: /* convert to local "natural" numbering and then to PETSc global numbering */
480: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
482: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
483: j_c = (j/ratioj); /* coarse grid node below fine grid node */
485: /* remainders */
486: li = i - ratioi * (i/ratioi);
487: if (i==mx-1) li = nxi-1;
488: lj = j - ratioj * (j/ratioj);
489: if (j==my-1) lj = neta-1;
491: /* corners */
492: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
493: cols[0] = col_shift + idx_c[col]; /* left, below */
494: Ni[0] = 1.0;
495: if ((li==0) || (li==nxi-1)) {
496: if ((lj==0) || (lj==neta-1)) {
497: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
498: continue;
499: }
500: }
502: /* edges + interior */
503: /* remainders */
504: if (i==mx-1) i_c--;
505: if (j==my-1) j_c--;
507: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
508: cols[0] = col_shift + idx_c[col]; /* left, below */
509: cols[1] = col_shift + idx_c[col+1]; /* right, below */
510: cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */
511: cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */
513: Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]);
514: Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]);
515: Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]);
516: Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]);
518: nc = 0;
519: if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1;
520: if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1;
521: if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1;
522: if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1;
524: MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES);
525: }
526: }
527: PetscFree(xi);
528: PetscFree(eta);
529: }
530: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
531: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
532: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
533: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
534: MatCreateMAIJ(mat,dof,A);
535: MatDestroy(&mat);
536: return(0);
537: }
539: /*
540: Contributed by Andrei Draganescu <aidraga@sandia.gov>
541: */
542: PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A)
543: {
544: PetscErrorCode ierr;
545: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
546: const PetscInt *idx_c,*idx_f;
547: ISLocalToGlobalMapping ltog_f,ltog_c;
548: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz;
549: PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj;
550: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale;
551: PetscMPIInt size_c,size_f,rank_f;
552: PetscScalar v[4];
553: Mat mat;
554: DMBoundaryType bx,by;
555: MatType mattype;
558: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
559: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
560: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
561: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
562: ratioi = mx/Mx;
563: ratioj = my/My;
564: if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x");
565: if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y");
566: if (ratioi != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 2");
567: if (ratioj != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 2");
569: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
570: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
571: DMGetLocalToGlobalMapping(daf,<og_f);
572: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
574: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
575: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
576: DMGetLocalToGlobalMapping(dac,<og_c);
577: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
579: /*
580: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
581: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
582: processors). It's effective length is hence 4 times its normal length, this is
583: why the col_scale is multiplied by the interpolation matrix column sizes.
584: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
585: copy of the coarse vector. A bit of a hack but you do better.
587: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
588: */
589: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
590: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
591: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
592: col_scale = size_f/size_c;
593: col_shift = Mx*My*(rank_f/size_c);
595: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);
596: for (j=j_start; j<j_start+n_f; j++) {
597: for (i=i_start; i<i_start+m_f; i++) {
598: /* convert to local "natural" numbering and then to PETSc global numbering */
599: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
601: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
602: j_c = (j/ratioj); /* coarse grid node below fine grid node */
604: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
605: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
606: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
607: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
609: /*
610: Only include those interpolation points that are truly
611: nonzero. Note this is very important for final grid lines
612: in x and y directions; since they have no right/top neighbors
613: */
614: nc = 0;
615: /* one left and below; or we are right on it */
616: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
617: cols[nc++] = col_shift + idx_c[col];
618: MatPreallocateSet(row,nc,cols,dnz,onz);
619: }
620: }
621: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
622: #if defined(PETSC_HAVE_CUDA)
623: /*
624: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
625: we don't want the original unconverted matrix copied to the GPU
626: */
627: if (dof > 1) {
628: MatPinToCPU(mat,PETSC_TRUE);
629: }
630: #endif
631: MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);
632: ConvertToAIJ(dac->mattype,&mattype);
633: MatSetType(mat,mattype);
634: MatSeqAIJSetPreallocation(mat,0,dnz);
635: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
636: MatPreallocateFinalize(dnz,onz);
638: /* loop over local fine grid nodes setting interpolation for those*/
639: for (j=j_start; j<j_start+n_f; j++) {
640: for (i=i_start; i<i_start+m_f; i++) {
641: /* convert to local "natural" numbering and then to PETSc global numbering */
642: row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
644: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
645: j_c = (j/ratioj); /* coarse grid node below fine grid node */
646: nc = 0;
647: /* one left and below; or we are right on it */
648: col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
649: cols[nc] = col_shift + idx_c[col];
650: v[nc++] = 1.0;
652: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
653: }
654: }
655: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
656: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
657: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
658: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
659: MatCreateMAIJ(mat,dof,A);
660: MatDestroy(&mat);
661: PetscLogFlops(13.0*m_f*n_f);
662: return(0);
663: }
665: /*
666: Contributed by Jianming Yang <jianming-yang@uiowa.edu>
667: */
668: PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A)
669: {
670: PetscErrorCode ierr;
671: PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof;
672: const PetscInt *idx_c,*idx_f;
673: ISLocalToGlobalMapping ltog_f,ltog_c;
674: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz;
675: PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol;
676: PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale;
677: PetscMPIInt size_c,size_f,rank_f;
678: PetscScalar v[8];
679: Mat mat;
680: DMBoundaryType bx,by,bz;
681: MatType mattype;
684: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
685: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
686: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
687: if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz);
688: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
689: ratioi = mx/Mx;
690: ratioj = my/My;
691: ratiol = mz/Mz;
692: if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x");
693: if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y");
694: if (ratiol*Mz != mz) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in z");
695: if (ratioi != 2 && ratioi != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 1 or 2");
696: if (ratioj != 2 && ratioj != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 1 or 2");
697: if (ratiol != 2 && ratiol != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in z must be 1 or 2");
699: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
700: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
701: DMGetLocalToGlobalMapping(daf,<og_f);
702: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
704: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
705: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
706: DMGetLocalToGlobalMapping(dac,<og_c);
707: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
709: /*
710: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
711: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
712: processors). It's effective length is hence 4 times its normal length, this is
713: why the col_scale is multiplied by the interpolation matrix column sizes.
714: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
715: copy of the coarse vector. A bit of a hack but you do better.
717: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
718: */
719: MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);
720: MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);
721: MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);
722: col_scale = size_f/size_c;
723: col_shift = Mx*My*Mz*(rank_f/size_c);
725: MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz);
726: for (l=l_start; l<l_start+p_f; l++) {
727: for (j=j_start; j<j_start+n_f; j++) {
728: for (i=i_start; i<i_start+m_f; i++) {
729: /* convert to local "natural" numbering and then to PETSc global numbering */
730: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
732: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
733: j_c = (j/ratioj); /* coarse grid node below fine grid node */
734: l_c = (l/ratiol);
736: if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
737: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
738: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
739: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
740: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
741: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
743: /*
744: Only include those interpolation points that are truly
745: nonzero. Note this is very important for final grid lines
746: in x and y directions; since they have no right/top neighbors
747: */
748: nc = 0;
749: /* one left and below; or we are right on it */
750: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
751: cols[nc++] = col_shift + idx_c[col];
752: MatPreallocateSet(row,nc,cols,dnz,onz);
753: }
754: }
755: }
756: MatCreate(PetscObjectComm((PetscObject)daf),&mat);
757: #if defined(PETSC_HAVE_CUDA)
758: /*
759: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
760: we don't want the original unconverted matrix copied to the GPU
761: */
762: if (dof > 1) {
763: MatPinToCPU(mat,PETSC_TRUE);
764: }
765: #endif
766: MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz);
767: ConvertToAIJ(dac->mattype,&mattype);
768: MatSetType(mat,mattype);
769: MatSeqAIJSetPreallocation(mat,0,dnz);
770: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
771: MatPreallocateFinalize(dnz,onz);
773: /* loop over local fine grid nodes setting interpolation for those*/
774: for (l=l_start; l<l_start+p_f; l++) {
775: for (j=j_start; j<j_start+n_f; j++) {
776: for (i=i_start; i<i_start+m_f; i++) {
777: /* convert to local "natural" numbering and then to PETSc global numbering */
778: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
780: i_c = (i/ratioi); /* coarse grid node to left of fine grid node */
781: j_c = (j/ratioj); /* coarse grid node below fine grid node */
782: l_c = (l/ratiol);
783: nc = 0;
784: /* one left and below; or we are right on it */
785: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
786: cols[nc] = col_shift + idx_c[col];
787: v[nc++] = 1.0;
789: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
790: }
791: }
792: }
793: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
794: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
795: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
796: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
797: MatCreateMAIJ(mat,dof,A);
798: MatDestroy(&mat);
799: PetscLogFlops(13.0*m_f*n_f*p_f);
800: return(0);
801: }
803: PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A)
804: {
805: PetscErrorCode ierr;
806: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l;
807: const PetscInt *idx_c,*idx_f;
808: ISLocalToGlobalMapping ltog_f,ltog_c;
809: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz;
810: PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok;
811: PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
812: PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c;
813: PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz;
814: PetscScalar v[8],x,y,z;
815: Mat mat;
816: DMBoundaryType bx,by,bz;
817: MatType mattype;
820: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
821: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
822: if (mx == Mx) {
823: ratioi = 1;
824: } else if (bx == DM_BOUNDARY_PERIODIC) {
825: if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx);
826: ratioi = mx/Mx;
827: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
828: } else {
829: if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx);
830: ratioi = (mx-1)/(Mx-1);
831: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
832: }
833: if (my == My) {
834: ratioj = 1;
835: } else if (by == DM_BOUNDARY_PERIODIC) {
836: if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My);
837: ratioj = my/My;
838: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
839: } else {
840: if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My);
841: ratioj = (my-1)/(My-1);
842: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
843: }
844: if (mz == Mz) {
845: ratiok = 1;
846: } else if (bz == DM_BOUNDARY_PERIODIC) {
847: if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz);
848: ratiok = mz/Mz;
849: if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D Mz %D",mz,Mz);
850: } else {
851: if (Mz < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be greater than 1",Mz);
852: ratiok = (mz-1)/(Mz-1);
853: if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz);
854: }
856: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
857: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
858: DMGetLocalToGlobalMapping(daf,<og_f);
859: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
861: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
862: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
863: DMGetLocalToGlobalMapping(dac,<og_c);
864: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
866: /* create interpolation matrix, determining exact preallocation */
867: MatPreallocateInitialize(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz);
868: /* loop over local fine grid nodes counting interpolating points */
869: for (l=l_start; l<l_start+p_f; l++) {
870: for (j=j_start; j<j_start+n_f; j++) {
871: for (i=i_start; i<i_start+m_f; i++) {
872: /* convert to local "natural" numbering and then to PETSc global numbering */
873: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
874: i_c = (i/ratioi);
875: j_c = (j/ratioj);
876: l_c = (l/ratiok);
877: if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
878: l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c);
879: if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
880: j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c);
881: if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
882: i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c);
884: /*
885: Only include those interpolation points that are truly
886: nonzero. Note this is very important for final grid lines
887: in x and y directions; since they have no right/top neighbors
888: */
889: nc = 0;
890: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
891: cols[nc++] = idx_c[col];
892: if (i_c*ratioi != i) {
893: cols[nc++] = idx_c[col+1];
894: }
895: if (j_c*ratioj != j) {
896: cols[nc++] = idx_c[col+m_ghost_c];
897: }
898: if (l_c*ratiok != l) {
899: cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c];
900: }
901: if (j_c*ratioj != j && i_c*ratioi != i) {
902: cols[nc++] = idx_c[col+(m_ghost_c+1)];
903: }
904: if (j_c*ratioj != j && l_c*ratiok != l) {
905: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
906: }
907: if (i_c*ratioi != i && l_c*ratiok != l) {
908: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
909: }
910: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
911: cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
912: }
913: MatPreallocateSet(row,nc,cols,dnz,onz);
914: }
915: }
916: }
917: MatCreate(PetscObjectComm((PetscObject)dac),&mat);
918: #if defined(PETSC_HAVE_CUDA)
919: /*
920: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
921: we don't want the original unconverted matrix copied to the GPU
922: */
923: if (dof > 1) {
924: MatPinToCPU(mat,PETSC_TRUE);
925: }
926: #endif
927: MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz);
928: ConvertToAIJ(dac->mattype,&mattype);
929: MatSetType(mat,mattype);
930: MatSeqAIJSetPreallocation(mat,0,dnz);
931: MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);
932: MatPreallocateFinalize(dnz,onz);
934: /* loop over local fine grid nodes setting interpolation for those*/
935: if (!NEWVERSION) {
937: for (l=l_start; l<l_start+p_f; l++) {
938: for (j=j_start; j<j_start+n_f; j++) {
939: for (i=i_start; i<i_start+m_f; i++) {
940: /* convert to local "natural" numbering and then to PETSc global numbering */
941: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
943: i_c = (i/ratioi);
944: j_c = (j/ratioj);
945: l_c = (l/ratiok);
947: /*
948: Only include those interpolation points that are truly
949: nonzero. Note this is very important for final grid lines
950: in x and y directions; since they have no right/top neighbors
951: */
952: x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi);
953: y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj);
954: z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok);
956: nc = 0;
957: /* one left and below; or we are right on it */
958: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
960: cols[nc] = idx_c[col];
961: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
963: if (i_c*ratioi != i) {
964: cols[nc] = idx_c[col+1];
965: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.));
966: }
968: if (j_c*ratioj != j) {
969: cols[nc] = idx_c[col+m_ghost_c];
970: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
971: }
973: if (l_c*ratiok != l) {
974: cols[nc] = idx_c[col+m_ghost_c*n_ghost_c];
975: v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
976: }
978: if (j_c*ratioj != j && i_c*ratioi != i) {
979: cols[nc] = idx_c[col+(m_ghost_c+1)];
980: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.));
981: }
983: if (j_c*ratioj != j && l_c*ratiok != l) {
984: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)];
985: v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
986: }
988: if (i_c*ratioi != i && l_c*ratiok != l) {
989: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)];
990: v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.));
991: }
993: if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) {
994: cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)];
995: v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.));
996: }
997: MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);
998: }
999: }
1000: }
1002: } else {
1003: PetscScalar *xi,*eta,*zeta;
1004: PetscInt li,nxi,lj,neta,lk,nzeta,n;
1005: PetscScalar Ni[8];
1007: /* compute local coordinate arrays */
1008: nxi = ratioi + 1;
1009: neta = ratioj + 1;
1010: nzeta = ratiok + 1;
1011: PetscMalloc1(nxi,&xi);
1012: PetscMalloc1(neta,&eta);
1013: PetscMalloc1(nzeta,&zeta);
1014: for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1));
1015: for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1));
1016: for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1));
1018: for (l=l_start; l<l_start+p_f; l++) {
1019: for (j=j_start; j<j_start+n_f; j++) {
1020: for (i=i_start; i<i_start+m_f; i++) {
1021: /* convert to local "natural" numbering and then to PETSc global numbering */
1022: row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))];
1024: i_c = (i/ratioi);
1025: j_c = (j/ratioj);
1026: l_c = (l/ratiok);
1028: /* remainders */
1029: li = i - ratioi * (i/ratioi);
1030: if (i==mx-1) li = nxi-1;
1031: lj = j - ratioj * (j/ratioj);
1032: if (j==my-1) lj = neta-1;
1033: lk = l - ratiok * (l/ratiok);
1034: if (l==mz-1) lk = nzeta-1;
1036: /* corners */
1037: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c));
1038: cols[0] = idx_c[col];
1039: Ni[0] = 1.0;
1040: if ((li==0) || (li==nxi-1)) {
1041: if ((lj==0) || (lj==neta-1)) {
1042: if ((lk==0) || (lk==nzeta-1)) {
1043: MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);
1044: continue;
1045: }
1046: }
1047: }
1049: /* edges + interior */
1050: /* remainders */
1051: if (i==mx-1) i_c--;
1052: if (j==my-1) j_c--;
1053: if (l==mz-1) l_c--;
1055: col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c));
1056: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
1057: cols[1] = idx_c[col+1]; /* one right and below */
1058: cols[2] = idx_c[col+m_ghost_c]; /* one left and above */
1059: cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */
1061: cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */
1062: cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */
1063: cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/
1064: cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */
1066: Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1067: Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]);
1068: Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1069: Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]);
1071: Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1072: Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]);
1073: Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1074: Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]);
1076: for (n=0; n<8; n++) {
1077: if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1;
1078: }
1079: MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES);
1081: }
1082: }
1083: }
1084: PetscFree(xi);
1085: PetscFree(eta);
1086: PetscFree(zeta);
1087: }
1088: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1089: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1090: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
1091: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
1093: MatCreateMAIJ(mat,dof,A);
1094: MatDestroy(&mat);
1095: return(0);
1096: }
1098: PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale)
1099: {
1100: PetscErrorCode ierr;
1101: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1102: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1103: DMDAStencilType stc,stf;
1104: DM_DA *ddc = (DM_DA*)dac->data;
1112: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1113: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1114: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1115: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1116: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1117: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1118: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1119: if (Mc < 2 && Mf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction");
1120: if (dimc > 1 && Nc < 2 && Nf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction");
1121: if (dimc > 2 && Pc < 2 && Pf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction");
1123: if (ddc->interptype == DMDA_Q1) {
1124: if (dimc == 1) {
1125: DMCreateInterpolation_DA_1D_Q1(dac,daf,A);
1126: } else if (dimc == 2) {
1127: DMCreateInterpolation_DA_2D_Q1(dac,daf,A);
1128: } else if (dimc == 3) {
1129: DMCreateInterpolation_DA_3D_Q1(dac,daf,A);
1130: } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1131: } else if (ddc->interptype == DMDA_Q0) {
1132: if (dimc == 1) {
1133: DMCreateInterpolation_DA_1D_Q0(dac,daf,A);
1134: } else if (dimc == 2) {
1135: DMCreateInterpolation_DA_2D_Q0(dac,daf,A);
1136: } else if (dimc == 3) {
1137: DMCreateInterpolation_DA_3D_Q0(dac,daf,A);
1138: } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype);
1139: }
1140: if (scale) {
1141: DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale);
1142: }
1143: return(0);
1144: }
1146: PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject)
1147: {
1148: PetscErrorCode ierr;
1149: PetscInt i,i_start,m_f,Mx,dof;
1150: const PetscInt *idx_f;
1151: ISLocalToGlobalMapping ltog_f;
1152: PetscInt m_ghost,m_ghost_c;
1153: PetscInt row,i_start_ghost,mx,m_c,nc,ratioi;
1154: PetscInt i_start_c,i_start_ghost_c;
1155: PetscInt *cols;
1156: DMBoundaryType bx;
1157: Vec vecf,vecc;
1158: IS isf;
1161: DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);
1162: DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);
1163: if (bx == DM_BOUNDARY_PERIODIC) {
1164: ratioi = mx/Mx;
1165: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1166: } else {
1167: ratioi = (mx-1)/(Mx-1);
1168: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1169: }
1171: DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);
1172: DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);
1173: DMGetLocalToGlobalMapping(daf,<og_f);
1174: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1176: DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);
1177: DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);
1180: /* loop over local fine grid nodes setting interpolation for those*/
1181: nc = 0;
1182: PetscMalloc1(m_f,&cols);
1185: for (i=i_start_c; i<i_start_c+m_c; i++) {
1186: PetscInt i_f = i*ratioi;
1188: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\ni_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1190: row = idx_f[(i_f-i_start_ghost)];
1191: cols[nc++] = row;
1192: }
1194: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1195: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1196: DMGetGlobalVector(dac,&vecc);
1197: DMGetGlobalVector(daf,&vecf);
1198: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1199: DMRestoreGlobalVector(dac,&vecc);
1200: DMRestoreGlobalVector(daf,&vecf);
1201: ISDestroy(&isf);
1202: return(0);
1203: }
1205: PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject)
1206: {
1207: PetscErrorCode ierr;
1208: PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof;
1209: const PetscInt *idx_c,*idx_f;
1210: ISLocalToGlobalMapping ltog_f,ltog_c;
1211: PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c;
1212: PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj;
1213: PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c;
1214: PetscInt *cols;
1215: DMBoundaryType bx,by;
1216: Vec vecf,vecc;
1217: IS isf;
1220: DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);
1221: DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);
1222: if (bx == DM_BOUNDARY_PERIODIC) {
1223: ratioi = mx/Mx;
1224: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1225: } else {
1226: ratioi = (mx-1)/(Mx-1);
1227: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1228: }
1229: if (by == DM_BOUNDARY_PERIODIC) {
1230: ratioj = my/My;
1231: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
1232: } else {
1233: ratioj = (my-1)/(My-1);
1234: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
1235: }
1237: DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);
1238: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);
1239: DMGetLocalToGlobalMapping(daf,<og_f);
1240: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1242: DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);
1243: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);
1244: DMGetLocalToGlobalMapping(dac,<og_c);
1245: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1247: /* loop over local fine grid nodes setting interpolation for those*/
1248: nc = 0;
1249: PetscMalloc1(n_f*m_f,&cols);
1250: for (j=j_start_c; j<j_start_c+n_c; j++) {
1251: for (i=i_start_c; i<i_start_c+m_c; i++) {
1252: PetscInt i_f = i*ratioi,j_f = j*ratioj;
1253: if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
1254: j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1255: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\
1256: i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1257: row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1258: cols[nc++] = row;
1259: }
1260: }
1261: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1262: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1264: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1265: DMGetGlobalVector(dac,&vecc);
1266: DMGetGlobalVector(daf,&vecf);
1267: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1268: DMRestoreGlobalVector(dac,&vecc);
1269: DMRestoreGlobalVector(daf,&vecf);
1270: ISDestroy(&isf);
1271: return(0);
1272: }
1274: PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject)
1275: {
1276: PetscErrorCode ierr;
1277: PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz;
1278: PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c;
1279: PetscInt i_start_ghost,j_start_ghost,k_start_ghost;
1280: PetscInt mx,my,mz,ratioi,ratioj,ratiok;
1281: PetscInt i_start_c,j_start_c,k_start_c;
1282: PetscInt m_c,n_c,p_c;
1283: PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c;
1284: PetscInt row,nc,dof;
1285: const PetscInt *idx_c,*idx_f;
1286: ISLocalToGlobalMapping ltog_f,ltog_c;
1287: PetscInt *cols;
1288: DMBoundaryType bx,by,bz;
1289: Vec vecf,vecc;
1290: IS isf;
1293: DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);
1294: DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);
1296: if (bx == DM_BOUNDARY_PERIODIC) {
1297: ratioi = mx/Mx;
1298: if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx);
1299: } else {
1300: ratioi = (mx-1)/(Mx-1);
1301: if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx);
1302: }
1303: if (by == DM_BOUNDARY_PERIODIC) {
1304: ratioj = my/My;
1305: if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My);
1306: } else {
1307: ratioj = (my-1)/(My-1);
1308: if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My);
1309: }
1310: if (bz == DM_BOUNDARY_PERIODIC) {
1311: ratiok = mz/Mz;
1312: if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D My %D",mz,Mz);
1313: } else {
1314: ratiok = (mz-1)/(Mz-1);
1315: if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz);
1316: }
1318: DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f);
1319: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1320: DMGetLocalToGlobalMapping(daf,<og_f);
1321: ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);
1323: DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c);
1324: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1325: DMGetLocalToGlobalMapping(dac,<og_c);
1326: ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);
1329: /* loop over local fine grid nodes setting interpolation for those*/
1330: nc = 0;
1331: PetscMalloc1(n_f*m_f*p_f,&cols);
1332: for (k=k_start_c; k<k_start_c+p_c; k++) {
1333: for (j=j_start_c; j<j_start_c+n_c; j++) {
1334: for (i=i_start_c; i<i_start_c+m_c; i++) {
1335: PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok;
1336: if (k_f < k_start_ghost || k_f >= k_start_ghost+p_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1337: "k_c %D k_f %D fine ghost range [%D,%D]",k,k_f,k_start_ghost,k_start_ghost+p_ghost);
1338: if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1339: "j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost);
1340: if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA "
1341: "i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost);
1342: row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))];
1343: cols[nc++] = row;
1344: }
1345: }
1346: }
1347: ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);
1348: ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);
1350: ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);
1351: DMGetGlobalVector(dac,&vecc);
1352: DMGetGlobalVector(daf,&vecf);
1353: VecScatterCreate(vecf,isf,vecc,NULL,inject);
1354: DMRestoreGlobalVector(dac,&vecc);
1355: DMRestoreGlobalVector(daf,&vecf);
1356: ISDestroy(&isf);
1357: return(0);
1358: }
1360: PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat)
1361: {
1362: PetscErrorCode ierr;
1363: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1364: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1365: DMDAStencilType stc,stf;
1366: VecScatter inject = NULL;
1373: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1374: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1375: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1376: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1377: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1378: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1379: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1380: if (Mc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction");
1381: if (dimc > 1 && Nc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction");
1382: if (dimc > 2 && Pc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction");
1384: if (dimc == 1) {
1385: DMCreateInjection_DA_1D(dac,daf,&inject);
1386: } else if (dimc == 2) {
1387: DMCreateInjection_DA_2D(dac,daf,&inject);
1388: } else if (dimc == 3) {
1389: DMCreateInjection_DA_3D(dac,daf,&inject);
1390: }
1391: MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat);
1392: VecScatterDestroy(&inject);
1393: return(0);
1394: }
1396: /*@
1397: DMCreateAggregates - Deprecated, see DMDACreateAggregates.
1398: @*/
1399: PetscErrorCode DMCreateAggregates(DM dm1,DM dm2,Mat *mat)
1400: {
1401: return DMDACreateAggregates(dm1,dm2,mat);
1402: }
1404: /*@
1405: DMDACreateAggregates - Gets the aggregates that map between
1406: grids associated with two DMDAs.
1408: Collective on dmc
1410: Input Parameters:
1411: + dmc - the coarse grid DMDA
1412: - dmf - the fine grid DMDA
1414: Output Parameters:
1415: . rest - the restriction matrix (transpose of the projection matrix)
1417: Level: intermediate
1419: Note: This routine is not used by PETSc.
1420: It is not clear what its use case is and it may be removed in a future release.
1421: Users should contact petsc-maint@mcs.anl.gov if they plan to use it.
1423: .seealso: DMRefine(), DMCreateInjection(), DMCreateInterpolation()
1424: @*/
1425: PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest)
1426: {
1427: PetscErrorCode ierr;
1428: PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc;
1429: PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf;
1430: DMBoundaryType bxc,byc,bzc,bxf,byf,bzf;
1431: DMDAStencilType stc,stf;
1432: PetscInt i,j,l;
1433: PetscInt i_start,j_start,l_start, m_f,n_f,p_f;
1434: PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost;
1435: const PetscInt *idx_f;
1436: PetscInt i_c,j_c,l_c;
1437: PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c;
1438: PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c;
1439: const PetscInt *idx_c;
1440: PetscInt d;
1441: PetscInt a;
1442: PetscInt max_agg_size;
1443: PetscInt *fine_nodes;
1444: PetscScalar *one_vec;
1445: PetscInt fn_idx;
1446: ISLocalToGlobalMapping ltogmf,ltogmc;
1453: DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);
1454: DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);
1455: if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf);
1456: if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff);
1457: if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf);
1458: if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs");
1459: if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs");
1461: if (Mf < Mc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Mc %D, Mf %D", Mc, Mf);
1462: if (Nf < Nc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Nc %D, Nf %D", Nc, Nf);
1463: if (Pf < Pc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Pc %D, Pf %D", Pc, Pf);
1465: if (Pc < 0) Pc = 1;
1466: if (Pf < 0) Pf = 1;
1467: if (Nc < 0) Nc = 1;
1468: if (Nf < 0) Nf = 1;
1470: DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);
1471: DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);
1473: DMGetLocalToGlobalMapping(daf,<ogmf);
1474: ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f);
1476: DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);
1477: DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);
1479: DMGetLocalToGlobalMapping(dac,<ogmc);
1480: ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c);
1482: /*
1483: Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios
1484: for dimension 1 and 2 respectively.
1485: Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1)
1486: and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate.
1487: Each specific dof on the fine grid is mapped to one dof on the coarse grid.
1488: */
1490: max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1);
1492: /* create the matrix that will contain the restriction operator */
1493: MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff,
1494: max_agg_size, NULL, max_agg_size, NULL, rest);
1496: /* store nodes in the fine grid here */
1497: PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes);
1498: for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0;
1500: /* loop over all coarse nodes */
1501: for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) {
1502: for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) {
1503: for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) {
1504: for (d=0; d<dofc; d++) {
1505: /* convert to local "natural" numbering and then to PETSc global numbering */
1506: a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d;
1508: fn_idx = 0;
1509: /* Corresponding fine points are all points (i_f, j_f, l_f) such that
1510: i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc
1511: (same for other dimensions)
1512: */
1513: for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) {
1514: for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) {
1515: for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) {
1516: fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d;
1517: fn_idx++;
1518: }
1519: }
1520: }
1521: /* add all these points to one aggregate */
1522: MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
1523: }
1524: }
1525: }
1526: }
1527: ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f);
1528: ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c);
1529: PetscFree2(one_vec,fine_nodes);
1530: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
1531: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
1532: return(0);
1533: }