Actual source code: dainterp.c
1: /*
2: Code for interpolating between grids represented by DMDAs
3: */
5: /*
6: For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does
7: not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results
8: we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some
9: consider it cleaner, but old version is turned on since it handles periodic case.
10: */
11: #define NEWVERSION 0
13: #include <petsc/private/dmdaimpl.h>
15: /*
16: Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ.
17: This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the
18: matrix type for both the operator matrices and the interpolation matrices so that users
19: can select matrix types of base MATAIJ for accelerators
20: */
21: static PetscErrorCode ConvertToAIJ(MatType intype, MatType *outtype)
22: {
23: PetscInt i;
24: char const *types[3] = {MATAIJ, MATSEQAIJ, MATMPIAIJ};
25: PetscBool flg;
27: PetscFunctionBegin;
28: *outtype = MATAIJ;
29: for (i = 0; i < 3; i++) {
30: PetscCall(PetscStrbeginswith(intype, types[i], &flg));
31: if (flg) {
32: *outtype = intype;
33: PetscFunctionReturn(PETSC_SUCCESS);
34: }
35: }
36: PetscFunctionReturn(PETSC_SUCCESS);
37: }
39: static PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac, DM daf, Mat *A)
40: {
41: PetscInt i, i_start, m_f, Mx;
42: const PetscInt *idx_f, *idx_c;
43: PetscInt m_ghost, m_ghost_c;
44: PetscInt row, col, i_start_ghost, mx, m_c, nc, ratio;
45: PetscInt i_c, i_start_c, i_start_ghost_c, cols[2], dof;
46: PetscScalar v[2], x;
47: Mat mat;
48: DMBoundaryType bx;
49: ISLocalToGlobalMapping ltog_f, ltog_c;
50: MatType mattype;
52: PetscFunctionBegin;
53: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
54: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
55: if (bx == DM_BOUNDARY_PERIODIC) {
56: ratio = mx / Mx;
57: PetscCheck(ratio * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
58: } else {
59: ratio = (mx - 1) / (Mx - 1);
60: PetscCheck(ratio * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
61: }
63: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
64: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
65: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
66: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
68: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
69: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
70: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
71: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
73: /* create interpolation matrix */
74: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
75: #if defined(PETSC_HAVE_CUDA)
76: /*
77: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
78: we don't want the original unconverted matrix copied to the GPU
79: */
80: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
81: #endif
82: PetscCall(MatSetSizes(mat, m_f, m_c, mx, Mx));
83: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
84: PetscCall(MatSetType(mat, mattype));
85: PetscCall(MatSeqAIJSetPreallocation(mat, 2, NULL));
86: PetscCall(MatMPIAIJSetPreallocation(mat, 2, NULL, 1, NULL));
88: /* loop over local fine grid nodes setting interpolation for those*/
89: if (!NEWVERSION) {
90: for (i = i_start; i < i_start + m_f; i++) {
91: /* convert to local "natural" numbering and then to PETSc global numbering */
92: row = idx_f[i - i_start_ghost];
94: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
95: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
96: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
97: i_start, i_c, i_start_ghost_c);
99: /*
100: Only include those interpolation points that are truly
101: nonzero. Note this is very important for final grid lines
102: in x direction; since they have no right neighbor
103: */
104: x = ((PetscReal)(i - i_c * ratio)) / ((PetscReal)ratio);
105: nc = 0;
106: /* one left and below; or we are right on it */
107: col = (i_c - i_start_ghost_c);
108: cols[nc] = idx_c[col];
109: v[nc++] = -x + 1.0;
110: /* one right? */
111: if (i_c * ratio != i) {
112: cols[nc] = idx_c[col + 1];
113: v[nc++] = x;
114: }
115: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
116: }
118: } else {
119: PetscScalar *xi;
120: PetscInt li, nxi, n;
121: PetscScalar Ni[2];
123: /* compute local coordinate arrays */
124: nxi = ratio + 1;
125: PetscCall(PetscMalloc1(nxi, &xi));
126: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
128: for (i = i_start; i < i_start + m_f; i++) {
129: /* convert to local "natural" numbering and then to PETSc global numbering */
130: row = idx_f[(i - i_start_ghost)];
132: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
133: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
134: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
135: i_start, i_c, i_start_ghost_c);
137: /* remainders */
138: li = i - ratio * (i / ratio);
139: if (i == mx - 1) li = nxi - 1;
141: /* corners */
142: col = (i_c - i_start_ghost_c);
143: cols[0] = idx_c[col];
144: Ni[0] = 1.0;
145: if ((li == 0) || (li == nxi - 1)) {
146: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
147: continue;
148: }
150: /* edges + interior */
151: /* remainders */
152: if (i == mx - 1) i_c--;
154: col = (i_c - i_start_ghost_c);
155: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
156: cols[1] = idx_c[col + 1];
158: Ni[0] = 0.5 * (1.0 - xi[li]);
159: Ni[1] = 0.5 * (1.0 + xi[li]);
160: for (n = 0; n < 2; n++) {
161: if (PetscAbsScalar(Ni[n]) < 1.0e-32) cols[n] = -1;
162: }
163: PetscCall(MatSetValues(mat, 1, &row, 2, cols, Ni, INSERT_VALUES));
164: }
165: PetscCall(PetscFree(xi));
166: }
167: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
168: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
169: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
170: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
171: PetscCall(MatCreateMAIJ(mat, dof, A));
172: PetscCall(MatDestroy(&mat));
173: PetscFunctionReturn(PETSC_SUCCESS);
174: }
176: static PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac, DM daf, Mat *A)
177: {
178: PetscInt i, i_start, m_f, Mx;
179: const PetscInt *idx_f, *idx_c;
180: ISLocalToGlobalMapping ltog_f, ltog_c;
181: PetscInt m_ghost, m_ghost_c;
182: PetscInt row, col, i_start_ghost, mx, m_c, nc, ratio;
183: PetscInt i_c, i_start_c, i_start_ghost_c, cols[2], dof;
184: PetscScalar v[2], x;
185: Mat mat;
186: DMBoundaryType bx;
187: MatType mattype;
189: PetscFunctionBegin;
190: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
191: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
192: if (bx == DM_BOUNDARY_PERIODIC) {
193: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
194: ratio = mx / Mx;
195: PetscCheck(ratio * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
196: } else {
197: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
198: ratio = (mx - 1) / (Mx - 1);
199: PetscCheck(ratio * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
200: }
202: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
203: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
204: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
205: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
207: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
208: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
209: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
210: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
212: /* create interpolation matrix */
213: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
214: #if defined(PETSC_HAVE_CUDA)
215: /*
216: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
217: we don't want the original unconverted matrix copied to the GPU
218: */
219: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
220: #endif
221: PetscCall(MatSetSizes(mat, m_f, m_c, mx, Mx));
222: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
223: PetscCall(MatSetType(mat, mattype));
224: PetscCall(MatSeqAIJSetPreallocation(mat, 2, NULL));
225: PetscCall(MatMPIAIJSetPreallocation(mat, 2, NULL, 0, NULL));
227: /* loop over local fine grid nodes setting interpolation for those*/
228: for (i = i_start; i < i_start + m_f; i++) {
229: /* convert to local "natural" numbering and then to PETSc global numbering */
230: row = idx_f[(i - i_start_ghost)];
232: i_c = (i / ratio); /* coarse grid node to left of fine grid node */
234: /*
235: Only include those interpolation points that are truly
236: nonzero. Note this is very important for final grid lines
237: in x direction; since they have no right neighbor
238: */
239: x = ((PetscReal)(i - i_c * ratio)) / ((PetscReal)ratio);
240: nc = 0;
241: /* one left and below; or we are right on it */
242: col = (i_c - i_start_ghost_c);
243: cols[nc] = idx_c[col];
244: v[nc++] = -x + 1.0;
245: /* one right? */
246: if (i_c * ratio != i) {
247: cols[nc] = idx_c[col + 1];
248: v[nc++] = x;
249: }
250: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
251: }
252: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
253: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
254: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
255: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
256: PetscCall(MatCreateMAIJ(mat, dof, A));
257: PetscCall(MatDestroy(&mat));
258: PetscCall(PetscLogFlops(5.0 * m_f));
259: PetscFunctionReturn(PETSC_SUCCESS);
260: }
262: static PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac, DM daf, Mat *A)
263: {
264: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
265: const PetscInt *idx_c, *idx_f;
266: ISLocalToGlobalMapping ltog_f, ltog_c;
267: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, *dnz, *onz;
268: PetscInt row, col, i_start_ghost, j_start_ghost, cols[4], mx, m_c, my, nc, ratioi, ratioj;
269: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c, col_shift, col_scale;
270: PetscMPIInt size_c, size_f, rank_f;
271: PetscScalar v[4], x, y;
272: Mat mat;
273: DMBoundaryType bx, by;
274: MatType mattype;
276: PetscFunctionBegin;
277: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
278: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
279: if (bx == DM_BOUNDARY_PERIODIC) {
280: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
281: ratioi = mx / Mx;
282: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
283: } else {
284: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
285: ratioi = (mx - 1) / (Mx - 1);
286: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
287: }
288: if (by == DM_BOUNDARY_PERIODIC) {
289: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
290: ratioj = my / My;
291: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
292: } else {
293: PetscCheck(My >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be greater than 1", My);
294: ratioj = (my - 1) / (My - 1);
295: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
296: }
298: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
299: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
300: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
301: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
303: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
304: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
305: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
306: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
308: /*
309: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
310: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
311: processors). It's effective length is hence 4 times its normal length, this is
312: why the col_scale is multiplied by the interpolation matrix column sizes.
313: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
314: copy of the coarse vector. A bit of a hack but you do better.
316: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
317: */
318: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
319: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
320: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
321: col_scale = size_f / size_c;
322: col_shift = Mx * My * (rank_f / size_c);
324: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f, col_scale * m_c * n_c, dnz, onz);
325: for (j = j_start; j < j_start + n_f; j++) {
326: for (i = i_start; i < i_start + m_f; i++) {
327: /* convert to local "natural" numbering and then to PETSc global numbering */
328: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
330: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
331: j_c = (j / ratioj); /* coarse grid node below fine grid node */
333: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
334: j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,
335: j_start, j_c, j_start_ghost_c);
336: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
337: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
338: i_start, i_c, i_start_ghost_c);
340: /*
341: Only include those interpolation points that are truly
342: nonzero. Note this is very important for final grid lines
343: in x and y directions; since they have no right/top neighbors
344: */
345: nc = 0;
346: /* one left and below; or we are right on it */
347: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
348: cols[nc++] = col_shift + idx_c[col];
349: /* one right and below */
350: if (i_c * ratioi != i) cols[nc++] = col_shift + idx_c[col + 1];
351: /* one left and above */
352: if (j_c * ratioj != j) cols[nc++] = col_shift + idx_c[col + m_ghost_c];
353: /* one right and above */
354: if (i_c * ratioi != i && j_c * ratioj != j) cols[nc++] = col_shift + idx_c[col + (m_ghost_c + 1)];
355: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
356: }
357: }
358: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
359: #if defined(PETSC_HAVE_CUDA)
360: /*
361: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
362: we don't want the original unconverted matrix copied to the GPU
363: */
364: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
365: #endif
366: PetscCall(MatSetSizes(mat, m_f * n_f, col_scale * m_c * n_c, mx * my, col_scale * Mx * My));
367: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
368: PetscCall(MatSetType(mat, mattype));
369: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
370: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
371: MatPreallocateEnd(dnz, onz);
373: /* loop over local fine grid nodes setting interpolation for those*/
374: if (!NEWVERSION) {
375: for (j = j_start; j < j_start + n_f; j++) {
376: for (i = i_start; i < i_start + m_f; i++) {
377: /* convert to local "natural" numbering and then to PETSc global numbering */
378: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
380: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
381: j_c = (j / ratioj); /* coarse grid node below fine grid node */
383: /*
384: Only include those interpolation points that are truly
385: nonzero. Note this is very important for final grid lines
386: in x and y directions; since they have no right/top neighbors
387: */
388: x = ((PetscReal)(i - i_c * ratioi)) / ((PetscReal)ratioi);
389: y = ((PetscReal)(j - j_c * ratioj)) / ((PetscReal)ratioj);
391: nc = 0;
392: /* one left and below; or we are right on it */
393: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
394: cols[nc] = col_shift + idx_c[col];
395: v[nc++] = x * y - x - y + 1.0;
396: /* one right and below */
397: if (i_c * ratioi != i) {
398: cols[nc] = col_shift + idx_c[col + 1];
399: v[nc++] = -x * y + x;
400: }
401: /* one left and above */
402: if (j_c * ratioj != j) {
403: cols[nc] = col_shift + idx_c[col + m_ghost_c];
404: v[nc++] = -x * y + y;
405: }
406: /* one right and above */
407: if (j_c * ratioj != j && i_c * ratioi != i) {
408: cols[nc] = col_shift + idx_c[col + (m_ghost_c + 1)];
409: v[nc++] = x * y;
410: }
411: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
412: }
413: }
415: } else {
416: PetscScalar Ni[4];
417: PetscScalar *xi, *eta;
418: PetscInt li, nxi, lj, neta;
420: /* compute local coordinate arrays */
421: nxi = ratioi + 1;
422: neta = ratioj + 1;
423: PetscCall(PetscMalloc1(nxi, &xi));
424: PetscCall(PetscMalloc1(neta, &eta));
425: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
426: for (lj = 0; lj < neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj * (2.0 / (PetscScalar)(neta - 1));
428: /* loop over local fine grid nodes setting interpolation for those*/
429: for (j = j_start; j < j_start + n_f; j++) {
430: for (i = i_start; i < i_start + m_f; i++) {
431: /* convert to local "natural" numbering and then to PETSc global numbering */
432: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
434: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
435: j_c = (j / ratioj); /* coarse grid node below fine grid node */
437: /* remainders */
438: li = i - ratioi * (i / ratioi);
439: if (i == mx - 1) li = nxi - 1;
440: lj = j - ratioj * (j / ratioj);
441: if (j == my - 1) lj = neta - 1;
443: /* corners */
444: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
445: cols[0] = col_shift + idx_c[col]; /* left, below */
446: Ni[0] = 1.0;
447: if ((li == 0) || (li == nxi - 1)) {
448: if ((lj == 0) || (lj == neta - 1)) {
449: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
450: continue;
451: }
452: }
454: /* edges + interior */
455: /* remainders */
456: if (i == mx - 1) i_c--;
457: if (j == my - 1) j_c--;
459: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
460: cols[0] = col_shift + idx_c[col]; /* left, below */
461: cols[1] = col_shift + idx_c[col + 1]; /* right, below */
462: cols[2] = col_shift + idx_c[col + m_ghost_c]; /* left, above */
463: cols[3] = col_shift + idx_c[col + (m_ghost_c + 1)]; /* right, above */
465: Ni[0] = 0.25 * (1.0 - xi[li]) * (1.0 - eta[lj]);
466: Ni[1] = 0.25 * (1.0 + xi[li]) * (1.0 - eta[lj]);
467: Ni[2] = 0.25 * (1.0 - xi[li]) * (1.0 + eta[lj]);
468: Ni[3] = 0.25 * (1.0 + xi[li]) * (1.0 + eta[lj]);
470: nc = 0;
471: if (PetscAbsScalar(Ni[0]) < 1.0e-32) cols[0] = -1;
472: if (PetscAbsScalar(Ni[1]) < 1.0e-32) cols[1] = -1;
473: if (PetscAbsScalar(Ni[2]) < 1.0e-32) cols[2] = -1;
474: if (PetscAbsScalar(Ni[3]) < 1.0e-32) cols[3] = -1;
476: PetscCall(MatSetValues(mat, 1, &row, 4, cols, Ni, INSERT_VALUES));
477: }
478: }
479: PetscCall(PetscFree(xi));
480: PetscCall(PetscFree(eta));
481: }
482: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
483: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
484: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
485: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
486: PetscCall(MatCreateMAIJ(mat, dof, A));
487: PetscCall(MatDestroy(&mat));
488: PetscFunctionReturn(PETSC_SUCCESS);
489: }
491: /*
492: Contributed by Andrei Draganescu <aidraga@sandia.gov>
493: */
494: static PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac, DM daf, Mat *A)
495: {
496: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
497: const PetscInt *idx_c, *idx_f;
498: ISLocalToGlobalMapping ltog_f, ltog_c;
499: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, *dnz, *onz;
500: PetscInt row, col, i_start_ghost, j_start_ghost, cols[4], mx, m_c, my, nc, ratioi, ratioj;
501: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c, col_shift, col_scale;
502: PetscMPIInt size_c, size_f, rank_f;
503: PetscScalar v[4];
504: Mat mat;
505: DMBoundaryType bx, by;
506: MatType mattype;
508: PetscFunctionBegin;
509: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
510: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
511: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
512: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
513: ratioi = mx / Mx;
514: ratioj = my / My;
515: PetscCheck(ratioi * Mx == mx, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in x");
516: PetscCheck(ratioj * My == my, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in y");
517: PetscCheck(ratioi == 2, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in x must be 2");
518: PetscCheck(ratioj == 2, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in y must be 2");
520: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
521: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
522: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
523: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
525: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
526: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
527: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
528: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
530: /*
531: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
532: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
533: processors). It's effective length is hence 4 times its normal length, this is
534: why the col_scale is multiplied by the interpolation matrix column sizes.
535: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
536: copy of the coarse vector. A bit of a hack but you do better.
538: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
539: */
540: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
541: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
542: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
543: col_scale = size_f / size_c;
544: col_shift = Mx * My * (rank_f / size_c);
546: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f, col_scale * m_c * n_c, dnz, onz);
547: for (j = j_start; j < j_start + n_f; j++) {
548: for (i = i_start; i < i_start + m_f; i++) {
549: /* convert to local "natural" numbering and then to PETSc global numbering */
550: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
552: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
553: j_c = (j / ratioj); /* coarse grid node below fine grid node */
555: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
556: j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,
557: j_start, j_c, j_start_ghost_c);
558: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
559: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
560: i_start, i_c, i_start_ghost_c);
562: /*
563: Only include those interpolation points that are truly
564: nonzero. Note this is very important for final grid lines
565: in x and y directions; since they have no right/top neighbors
566: */
567: nc = 0;
568: /* one left and below; or we are right on it */
569: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
570: cols[nc++] = col_shift + idx_c[col];
571: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
572: }
573: }
574: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
575: #if defined(PETSC_HAVE_CUDA)
576: /*
577: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
578: we don't want the original unconverted matrix copied to the GPU
579: */
580: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
581: #endif
582: PetscCall(MatSetSizes(mat, m_f * n_f, col_scale * m_c * n_c, mx * my, col_scale * Mx * My));
583: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
584: PetscCall(MatSetType(mat, mattype));
585: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
586: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
587: MatPreallocateEnd(dnz, onz);
589: /* loop over local fine grid nodes setting interpolation for those*/
590: for (j = j_start; j < j_start + n_f; j++) {
591: for (i = i_start; i < i_start + m_f; i++) {
592: /* convert to local "natural" numbering and then to PETSc global numbering */
593: row = idx_f[(m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
595: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
596: j_c = (j / ratioj); /* coarse grid node below fine grid node */
597: nc = 0;
598: /* one left and below; or we are right on it */
599: col = (m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
600: cols[nc] = col_shift + idx_c[col];
601: v[nc++] = 1.0;
603: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
604: }
605: }
606: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
607: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
608: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
609: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
610: PetscCall(MatCreateMAIJ(mat, dof, A));
611: PetscCall(MatDestroy(&mat));
612: PetscCall(PetscLogFlops(13.0 * m_f * n_f));
613: PetscFunctionReturn(PETSC_SUCCESS);
614: }
616: /*
617: Contributed by Jianming Yang <jianming-yang@uiowa.edu>
618: */
619: static PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac, DM daf, Mat *A)
620: {
621: PetscInt i, j, l, i_start, j_start, l_start, m_f, n_f, p_f, Mx, My, Mz, dof;
622: const PetscInt *idx_c, *idx_f;
623: ISLocalToGlobalMapping ltog_f, ltog_c;
624: PetscInt m_ghost, n_ghost, p_ghost, m_ghost_c, n_ghost_c, p_ghost_c, nc, *dnz, *onz;
625: PetscInt row, col, i_start_ghost, j_start_ghost, l_start_ghost, cols[8], mx, m_c, my, n_c, mz, p_c, ratioi, ratioj, ratiol;
626: PetscInt i_c, j_c, l_c, i_start_c, j_start_c, l_start_c, i_start_ghost_c, j_start_ghost_c, l_start_ghost_c, col_shift, col_scale;
627: PetscMPIInt size_c, size_f, rank_f;
628: PetscScalar v[8];
629: Mat mat;
630: DMBoundaryType bx, by, bz;
631: MatType mattype;
633: PetscFunctionBegin;
634: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
635: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
636: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
637: PetscCheck(Mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be positive", Mz);
638: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
639: ratioi = mx / Mx;
640: ratioj = my / My;
641: ratiol = mz / Mz;
642: PetscCheck(ratioi * Mx == mx, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in x");
643: PetscCheck(ratioj * My == my, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in y");
644: PetscCheck(ratiol * Mz == mz, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Fine grid points must be multiple of coarse grid points in z");
645: PetscCheck(ratioi == 2 || ratioi == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in x must be 1 or 2");
646: PetscCheck(ratioj == 2 || ratioj == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in y must be 1 or 2");
647: PetscCheck(ratiol == 2 || ratiol == 1, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_WRONG, "Coarsening factor in z must be 1 or 2");
649: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
650: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
651: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
652: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
654: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
655: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
656: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
657: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
659: /*
660: Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA.
661: The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the
662: processors). It's effective length is hence 4 times its normal length, this is
663: why the col_scale is multiplied by the interpolation matrix column sizes.
664: sol_shift allows each set of 1/4 processors do its own interpolation using ITS
665: copy of the coarse vector. A bit of a hack but you do better.
667: In the standard case when size_f == size_c col_scale == 1 and col_shift == 0
668: */
669: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac), &size_c));
670: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf), &size_f));
671: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf), &rank_f));
672: col_scale = size_f / size_c;
673: col_shift = Mx * My * Mz * (rank_f / size_c);
675: MatPreallocateBegin(PetscObjectComm((PetscObject)daf), m_f * n_f * p_f, col_scale * m_c * n_c * p_c, dnz, onz);
676: for (l = l_start; l < l_start + p_f; l++) {
677: for (j = j_start; j < j_start + n_f; j++) {
678: for (i = i_start; i < i_start + m_f; i++) {
679: /* convert to local "natural" numbering and then to PETSc global numbering */
680: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
682: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
683: j_c = (j / ratioj); /* coarse grid node below fine grid node */
684: l_c = (l / ratiol);
686: PetscCheck(l_c >= l_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
687: l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT,
688: l_start, l_c, l_start_ghost_c);
689: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
690: j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,
691: j_start, j_c, j_start_ghost_c);
692: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
693: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
694: i_start, i_c, i_start_ghost_c);
696: /*
697: Only include those interpolation points that are truly
698: nonzero. Note this is very important for final grid lines
699: in x and y directions; since they have no right/top neighbors
700: */
701: nc = 0;
702: /* one left and below; or we are right on it */
703: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
704: cols[nc++] = col_shift + idx_c[col];
705: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
706: }
707: }
708: }
709: PetscCall(MatCreate(PetscObjectComm((PetscObject)daf), &mat));
710: #if defined(PETSC_HAVE_CUDA)
711: /*
712: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
713: we don't want the original unconverted matrix copied to the GPU
714: */
715: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
716: #endif
717: PetscCall(MatSetSizes(mat, m_f * n_f * p_f, col_scale * m_c * n_c * p_c, mx * my * mz, col_scale * Mx * My * Mz));
718: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
719: PetscCall(MatSetType(mat, mattype));
720: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
721: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
722: MatPreallocateEnd(dnz, onz);
724: /* loop over local fine grid nodes setting interpolation for those*/
725: for (l = l_start; l < l_start + p_f; l++) {
726: for (j = j_start; j < j_start + n_f; j++) {
727: for (i = i_start; i < i_start + m_f; i++) {
728: /* convert to local "natural" numbering and then to PETSc global numbering */
729: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
731: i_c = (i / ratioi); /* coarse grid node to left of fine grid node */
732: j_c = (j / ratioj); /* coarse grid node below fine grid node */
733: l_c = (l / ratiol);
734: nc = 0;
735: /* one left and below; or we are right on it */
736: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
737: cols[nc] = col_shift + idx_c[col];
738: v[nc++] = 1.0;
740: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
741: }
742: }
743: }
744: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
745: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
746: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
747: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
748: PetscCall(MatCreateMAIJ(mat, dof, A));
749: PetscCall(MatDestroy(&mat));
750: PetscCall(PetscLogFlops(13.0 * m_f * n_f * p_f));
751: PetscFunctionReturn(PETSC_SUCCESS);
752: }
754: static PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac, DM daf, Mat *A)
755: {
756: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof, l;
757: const PetscInt *idx_c, *idx_f;
758: ISLocalToGlobalMapping ltog_f, ltog_c;
759: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c, Mz, mz;
760: PetscInt row, col, i_start_ghost, j_start_ghost, cols[8], mx, m_c, my, nc, ratioi, ratioj, ratiok;
761: PetscInt i_c, j_c, i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c;
762: PetscInt l_start, p_f, l_start_ghost, p_ghost, l_start_c, p_c;
763: PetscInt l_start_ghost_c, p_ghost_c, l_c, *dnz, *onz;
764: PetscScalar v[8], x, y, z;
765: Mat mat;
766: DMBoundaryType bx, by, bz;
767: MatType mattype;
769: PetscFunctionBegin;
770: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
771: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
772: if (mx == Mx) {
773: ratioi = 1;
774: } else if (bx == DM_BOUNDARY_PERIODIC) {
775: PetscCheck(Mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be positive", Mx);
776: ratioi = mx / Mx;
777: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
778: } else {
779: PetscCheck(Mx >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of x coarse grid points %" PetscInt_FMT " must be greater than 1", Mx);
780: ratioi = (mx - 1) / (Mx - 1);
781: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
782: }
783: if (my == My) {
784: ratioj = 1;
785: } else if (by == DM_BOUNDARY_PERIODIC) {
786: PetscCheck(My, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be positive", My);
787: ratioj = my / My;
788: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
789: } else {
790: PetscCheck(My >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of y coarse grid points %" PetscInt_FMT " must be greater than 1", My);
791: ratioj = (my - 1) / (My - 1);
792: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
793: }
794: if (mz == Mz) {
795: ratiok = 1;
796: } else if (bz == DM_BOUNDARY_PERIODIC) {
797: PetscCheck(Mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be positive", Mz);
798: ratiok = mz / Mz;
799: PetscCheck(ratiok * Mz == mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
800: } else {
801: PetscCheck(Mz >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Number of z coarse grid points %" PetscInt_FMT " must be greater than 1", Mz);
802: ratiok = (mz - 1) / (Mz - 1);
803: PetscCheck(ratiok * (Mz - 1) == mz - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
804: }
806: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
807: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
808: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
809: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
811: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
812: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
813: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
814: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
816: /* create interpolation matrix, determining exact preallocation */
817: MatPreallocateBegin(PetscObjectComm((PetscObject)dac), m_f * n_f * p_f, m_c * n_c * p_c, dnz, onz);
818: /* loop over local fine grid nodes counting interpolating points */
819: for (l = l_start; l < l_start + p_f; l++) {
820: for (j = j_start; j < j_start + n_f; j++) {
821: for (i = i_start; i < i_start + m_f; i++) {
822: /* convert to local "natural" numbering and then to PETSc global numbering */
823: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
824: i_c = (i / ratioi);
825: j_c = (j / ratioj);
826: l_c = (l / ratiok);
827: PetscCheck(l_c >= l_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
828: l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT,
829: l_start, l_c, l_start_ghost_c);
830: PetscCheck(j_c >= j_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
831: j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,
832: j_start, j_c, j_start_ghost_c);
833: PetscCheck(i_c >= i_start_ghost_c, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
834: i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,
835: i_start, i_c, i_start_ghost_c);
837: /*
838: Only include those interpolation points that are truly
839: nonzero. Note this is very important for final grid lines
840: in x and y directions; since they have no right/top neighbors
841: */
842: nc = 0;
843: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
844: cols[nc++] = idx_c[col];
845: if (i_c * ratioi != i) cols[nc++] = idx_c[col + 1];
846: if (j_c * ratioj != j) cols[nc++] = idx_c[col + m_ghost_c];
847: if (l_c * ratiok != l) cols[nc++] = idx_c[col + m_ghost_c * n_ghost_c];
848: if (j_c * ratioj != j && i_c * ratioi != i) cols[nc++] = idx_c[col + (m_ghost_c + 1)];
849: if (j_c * ratioj != j && l_c * ratiok != l) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)];
850: if (i_c * ratioi != i && l_c * ratiok != l) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + 1)];
851: if (i_c * ratioi != i && l_c * ratiok != l && j_c * ratioj != j) cols[nc++] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)];
852: PetscCall(MatPreallocateSet(row, nc, cols, dnz, onz));
853: }
854: }
855: }
856: PetscCall(MatCreate(PetscObjectComm((PetscObject)dac), &mat));
857: #if defined(PETSC_HAVE_CUDA)
858: /*
859: Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU
860: we don't want the original unconverted matrix copied to the GPU
861: */
862: if (dof > 1) PetscCall(MatBindToCPU(mat, PETSC_TRUE));
863: #endif
864: PetscCall(MatSetSizes(mat, m_f * n_f * p_f, m_c * n_c * p_c, mx * my * mz, Mx * My * Mz));
865: PetscCall(ConvertToAIJ(dac->mattype, &mattype));
866: PetscCall(MatSetType(mat, mattype));
867: PetscCall(MatSeqAIJSetPreallocation(mat, 0, dnz));
868: PetscCall(MatMPIAIJSetPreallocation(mat, 0, dnz, 0, onz));
869: MatPreallocateEnd(dnz, onz);
871: /* loop over local fine grid nodes setting interpolation for those*/
872: if (!NEWVERSION) {
873: for (l = l_start; l < l_start + p_f; l++) {
874: for (j = j_start; j < j_start + n_f; j++) {
875: for (i = i_start; i < i_start + m_f; i++) {
876: /* convert to local "natural" numbering and then to PETSc global numbering */
877: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
879: i_c = (i / ratioi);
880: j_c = (j / ratioj);
881: l_c = (l / ratiok);
883: /*
884: Only include those interpolation points that are truly
885: nonzero. Note this is very important for final grid lines
886: in x and y directions; since they have no right/top neighbors
887: */
888: x = ((PetscReal)(i - i_c * ratioi)) / ((PetscReal)ratioi);
889: y = ((PetscReal)(j - j_c * ratioj)) / ((PetscReal)ratioj);
890: z = ((PetscReal)(l - l_c * ratiok)) / ((PetscReal)ratiok);
892: nc = 0;
893: /* one left and below; or we are right on it */
894: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
896: cols[nc] = idx_c[col];
897: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
899: if (i_c * ratioi != i) {
900: cols[nc] = idx_c[col + 1];
901: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
902: }
904: if (j_c * ratioj != j) {
905: cols[nc] = idx_c[col + m_ghost_c];
906: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
907: }
909: if (l_c * ratiok != l) {
910: cols[nc] = idx_c[col + m_ghost_c * n_ghost_c];
911: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
912: }
914: if (j_c * ratioj != j && i_c * ratioi != i) {
915: cols[nc] = idx_c[col + (m_ghost_c + 1)];
916: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. - (2.0 * z - 1.));
917: }
919: if (j_c * ratioj != j && l_c * ratiok != l) {
920: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)];
921: v[nc++] = .125 * (1. - (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
922: }
924: if (i_c * ratioi != i && l_c * ratiok != l) {
925: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + 1)];
926: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. - (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
927: }
929: if (i_c * ratioi != i && l_c * ratiok != l && j_c * ratioj != j) {
930: cols[nc] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)];
931: v[nc++] = .125 * (1. + (2.0 * x - 1.)) * (1. + (2.0 * y - 1.)) * (1. + (2.0 * z - 1.));
932: }
933: PetscCall(MatSetValues(mat, 1, &row, nc, cols, v, INSERT_VALUES));
934: }
935: }
936: }
938: } else {
939: PetscScalar *xi, *eta, *zeta;
940: PetscInt li, nxi, lj, neta, lk, nzeta, n;
941: PetscScalar Ni[8];
943: /* compute local coordinate arrays */
944: nxi = ratioi + 1;
945: neta = ratioj + 1;
946: nzeta = ratiok + 1;
947: PetscCall(PetscMalloc1(nxi, &xi));
948: PetscCall(PetscMalloc1(neta, &eta));
949: PetscCall(PetscMalloc1(nzeta, &zeta));
950: for (li = 0; li < nxi; li++) xi[li] = -1.0 + (PetscScalar)li * (2.0 / (PetscScalar)(nxi - 1));
951: for (lj = 0; lj < neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj * (2.0 / (PetscScalar)(neta - 1));
952: for (lk = 0; lk < nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk * (2.0 / (PetscScalar)(nzeta - 1));
954: for (l = l_start; l < l_start + p_f; l++) {
955: for (j = j_start; j < j_start + n_f; j++) {
956: for (i = i_start; i < i_start + m_f; i++) {
957: /* convert to local "natural" numbering and then to PETSc global numbering */
958: row = idx_f[(m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))];
960: i_c = (i / ratioi);
961: j_c = (j / ratioj);
962: l_c = (l / ratiok);
964: /* remainders */
965: li = i - ratioi * (i / ratioi);
966: if (i == mx - 1) li = nxi - 1;
967: lj = j - ratioj * (j / ratioj);
968: if (j == my - 1) lj = neta - 1;
969: lk = l - ratiok * (l / ratiok);
970: if (l == mz - 1) lk = nzeta - 1;
972: /* corners */
973: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
974: cols[0] = idx_c[col];
975: Ni[0] = 1.0;
976: if ((li == 0) || (li == nxi - 1)) {
977: if ((lj == 0) || (lj == neta - 1)) {
978: if ((lk == 0) || (lk == nzeta - 1)) {
979: PetscCall(MatSetValue(mat, row, cols[0], Ni[0], INSERT_VALUES));
980: continue;
981: }
982: }
983: }
985: /* edges + interior */
986: /* remainders */
987: if (i == mx - 1) i_c--;
988: if (j == my - 1) j_c--;
989: if (l == mz - 1) l_c--;
991: col = (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c));
992: cols[0] = idx_c[col]; /* one left and below; or we are right on it */
993: cols[1] = idx_c[col + 1]; /* one right and below */
994: cols[2] = idx_c[col + m_ghost_c]; /* one left and above */
995: cols[3] = idx_c[col + (m_ghost_c + 1)]; /* one right and above */
997: cols[4] = idx_c[col + m_ghost_c * n_ghost_c]; /* one left and below and front; or we are right on it */
998: cols[5] = idx_c[col + (m_ghost_c * n_ghost_c + 1)]; /* one right and below, and front */
999: cols[6] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c)]; /* one left and above and front*/
1000: cols[7] = idx_c[col + (m_ghost_c * n_ghost_c + m_ghost_c + 1)]; /* one right and above and front */
1002: Ni[0] = 0.125 * (1.0 - xi[li]) * (1.0 - eta[lj]) * (1.0 - zeta[lk]);
1003: Ni[1] = 0.125 * (1.0 + xi[li]) * (1.0 - eta[lj]) * (1.0 - zeta[lk]);
1004: Ni[2] = 0.125 * (1.0 - xi[li]) * (1.0 + eta[lj]) * (1.0 - zeta[lk]);
1005: Ni[3] = 0.125 * (1.0 + xi[li]) * (1.0 + eta[lj]) * (1.0 - zeta[lk]);
1007: Ni[4] = 0.125 * (1.0 - xi[li]) * (1.0 - eta[lj]) * (1.0 + zeta[lk]);
1008: Ni[5] = 0.125 * (1.0 + xi[li]) * (1.0 - eta[lj]) * (1.0 + zeta[lk]);
1009: Ni[6] = 0.125 * (1.0 - xi[li]) * (1.0 + eta[lj]) * (1.0 + zeta[lk]);
1010: Ni[7] = 0.125 * (1.0 + xi[li]) * (1.0 + eta[lj]) * (1.0 + zeta[lk]);
1012: for (n = 0; n < 8; n++) {
1013: if (PetscAbsScalar(Ni[n]) < 1.0e-32) cols[n] = -1;
1014: }
1015: PetscCall(MatSetValues(mat, 1, &row, 8, cols, Ni, INSERT_VALUES));
1016: }
1017: }
1018: }
1019: PetscCall(PetscFree(xi));
1020: PetscCall(PetscFree(eta));
1021: PetscCall(PetscFree(zeta));
1022: }
1023: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1024: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
1025: PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
1026: PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
1028: PetscCall(MatCreateMAIJ(mat, dof, A));
1029: PetscCall(MatDestroy(&mat));
1030: PetscFunctionReturn(PETSC_SUCCESS);
1031: }
1033: PetscErrorCode DMCreateInterpolation_DA(DM dac, DM daf, Mat *A, Vec *scale)
1034: {
1035: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc, dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1036: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1037: DMDAStencilType stc, stf;
1038: DM_DA *ddc = (DM_DA *)dac->data;
1040: PetscFunctionBegin;
1043: PetscAssertPointer(A, 3);
1044: if (scale) PetscAssertPointer(scale, 4);
1046: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1047: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1048: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1049: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1050: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1051: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1052: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1053: PetscCheck(Mc >= 2 || Mf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in x direction");
1054: PetscCheck(dimc <= 1 || Nc >= 2 || Nf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in y direction");
1055: PetscCheck(dimc <= 2 || Pc >= 2 || Pf <= 1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in z direction");
1057: if (ddc->interptype == DMDA_Q1) {
1058: if (dimc == 1) {
1059: PetscCall(DMCreateInterpolation_DA_1D_Q1(dac, daf, A));
1060: } else if (dimc == 2) {
1061: PetscCall(DMCreateInterpolation_DA_2D_Q1(dac, daf, A));
1062: } else if (dimc == 3) {
1063: PetscCall(DMCreateInterpolation_DA_3D_Q1(dac, daf, A));
1064: } else SETERRQ(PetscObjectComm((PetscObject)daf), PETSC_ERR_SUP, "No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d", dimc, (int)ddc->interptype);
1065: } else if (ddc->interptype == DMDA_Q0) {
1066: if (dimc == 1) {
1067: PetscCall(DMCreateInterpolation_DA_1D_Q0(dac, daf, A));
1068: } else if (dimc == 2) {
1069: PetscCall(DMCreateInterpolation_DA_2D_Q0(dac, daf, A));
1070: } else if (dimc == 3) {
1071: PetscCall(DMCreateInterpolation_DA_3D_Q0(dac, daf, A));
1072: } else SETERRQ(PetscObjectComm((PetscObject)daf), PETSC_ERR_SUP, "No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d", dimc, (int)ddc->interptype);
1073: }
1074: if (scale) PetscCall(DMCreateInterpolationScale((DM)dac, (DM)daf, *A, scale));
1075: PetscFunctionReturn(PETSC_SUCCESS);
1076: }
1078: static PetscErrorCode DMCreateInjection_DA_1D(DM dac, DM daf, VecScatter *inject)
1079: {
1080: PetscInt i, i_start, m_f, Mx, dof;
1081: const PetscInt *idx_f;
1082: ISLocalToGlobalMapping ltog_f;
1083: PetscInt m_ghost, m_ghost_c;
1084: PetscInt row, i_start_ghost, mx, m_c, nc, ratioi;
1085: PetscInt i_start_c, i_start_ghost_c;
1086: PetscInt *cols;
1087: DMBoundaryType bx;
1088: Vec vecf, vecc;
1089: IS isf;
1091: PetscFunctionBegin;
1092: PetscCall(DMDAGetInfo(dac, NULL, &Mx, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &bx, NULL, NULL, NULL));
1093: PetscCall(DMDAGetInfo(daf, NULL, &mx, NULL, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1094: if (bx == DM_BOUNDARY_PERIODIC) {
1095: ratioi = mx / Mx;
1096: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1097: } else {
1098: ratioi = (mx - 1) / (Mx - 1);
1099: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1100: }
1102: PetscCall(DMDAGetCorners(daf, &i_start, NULL, NULL, &m_f, NULL, NULL));
1103: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, NULL, NULL, &m_ghost, NULL, NULL));
1104: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1105: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1107: PetscCall(DMDAGetCorners(dac, &i_start_c, NULL, NULL, &m_c, NULL, NULL));
1108: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, NULL, NULL, &m_ghost_c, NULL, NULL));
1110: /* loop over local fine grid nodes setting interpolation for those*/
1111: nc = 0;
1112: PetscCall(PetscMalloc1(m_f, &cols));
1114: for (i = i_start_c; i < i_start_c + m_c; i++) {
1115: PetscInt i_f = i * ratioi;
1117: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\ni_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]", i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1119: row = idx_f[(i_f - i_start_ghost)];
1120: cols[nc++] = row;
1121: }
1123: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1124: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1125: PetscCall(DMGetGlobalVector(dac, &vecc));
1126: PetscCall(DMGetGlobalVector(daf, &vecf));
1127: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1128: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1129: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1130: PetscCall(ISDestroy(&isf));
1131: PetscFunctionReturn(PETSC_SUCCESS);
1132: }
1134: static PetscErrorCode DMCreateInjection_DA_2D(DM dac, DM daf, VecScatter *inject)
1135: {
1136: PetscInt i, j, i_start, j_start, m_f, n_f, Mx, My, dof;
1137: const PetscInt *idx_c, *idx_f;
1138: ISLocalToGlobalMapping ltog_f, ltog_c;
1139: PetscInt m_ghost, n_ghost, m_ghost_c, n_ghost_c;
1140: PetscInt row, i_start_ghost, j_start_ghost, mx, m_c, my, nc, ratioi, ratioj;
1141: PetscInt i_start_c, j_start_c, n_c, i_start_ghost_c, j_start_ghost_c;
1142: PetscInt *cols;
1143: DMBoundaryType bx, by;
1144: Vec vecf, vecc;
1145: IS isf;
1147: PetscFunctionBegin;
1148: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, NULL, NULL, NULL, NULL, NULL, NULL, &bx, &by, NULL, NULL));
1149: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, NULL, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1150: if (bx == DM_BOUNDARY_PERIODIC) {
1151: ratioi = mx / Mx;
1152: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1153: } else {
1154: ratioi = (mx - 1) / (Mx - 1);
1155: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1156: }
1157: if (by == DM_BOUNDARY_PERIODIC) {
1158: ratioj = my / My;
1159: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1160: } else {
1161: ratioj = (my - 1) / (My - 1);
1162: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1163: }
1165: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, NULL, &m_f, &n_f, NULL));
1166: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, NULL, &m_ghost, &n_ghost, NULL));
1167: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1168: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1170: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, NULL, &m_c, &n_c, NULL));
1171: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, NULL, &m_ghost_c, &n_ghost_c, NULL));
1172: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
1173: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
1175: /* loop over local fine grid nodes setting interpolation for those*/
1176: nc = 0;
1177: PetscCall(PetscMalloc1(n_f * m_f, &cols));
1178: for (j = j_start_c; j < j_start_c + n_c; j++) {
1179: for (i = i_start_c; i < i_start_c + m_c; i++) {
1180: PetscInt i_f = i * ratioi, j_f = j * ratioj;
1181: PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost + n_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
1182: j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1183: j, j_f, j_start_ghost, j_start_ghost + n_ghost);
1184: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Processor's coarse DMDA must lie over fine DMDA\n\
1185: i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1186: i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1187: row = idx_f[(m_ghost * (j_f - j_start_ghost) + (i_f - i_start_ghost))];
1188: cols[nc++] = row;
1189: }
1190: }
1191: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1192: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
1194: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1195: PetscCall(DMGetGlobalVector(dac, &vecc));
1196: PetscCall(DMGetGlobalVector(daf, &vecf));
1197: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1198: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1199: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1200: PetscCall(ISDestroy(&isf));
1201: PetscFunctionReturn(PETSC_SUCCESS);
1202: }
1204: static PetscErrorCode DMCreateInjection_DA_3D(DM dac, DM daf, VecScatter *inject)
1205: {
1206: PetscInt i, j, k, i_start, j_start, k_start, m_f, n_f, p_f, Mx, My, Mz;
1207: PetscInt m_ghost, n_ghost, p_ghost, m_ghost_c, n_ghost_c, p_ghost_c;
1208: PetscInt i_start_ghost, j_start_ghost, k_start_ghost;
1209: PetscInt mx, my, mz, ratioi, ratioj, ratiok;
1210: PetscInt i_start_c, j_start_c, k_start_c;
1211: PetscInt m_c, n_c, p_c;
1212: PetscInt i_start_ghost_c, j_start_ghost_c, k_start_ghost_c;
1213: PetscInt row, nc, dof;
1214: const PetscInt *idx_c, *idx_f;
1215: ISLocalToGlobalMapping ltog_f, ltog_c;
1216: PetscInt *cols;
1217: DMBoundaryType bx, by, bz;
1218: Vec vecf, vecc;
1219: IS isf;
1221: PetscFunctionBegin;
1222: PetscCall(DMDAGetInfo(dac, NULL, &Mx, &My, &Mz, NULL, NULL, NULL, NULL, NULL, &bx, &by, &bz, NULL));
1223: PetscCall(DMDAGetInfo(daf, NULL, &mx, &my, &mz, NULL, NULL, NULL, &dof, NULL, NULL, NULL, NULL, NULL));
1225: if (bx == DM_BOUNDARY_PERIODIC) {
1226: ratioi = mx / Mx;
1227: PetscCheck(ratioi * Mx == mx, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1228: } else {
1229: ratioi = (mx - 1) / (Mx - 1);
1230: PetscCheck(ratioi * (Mx - 1) == mx - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT, mx, Mx);
1231: }
1232: if (by == DM_BOUNDARY_PERIODIC) {
1233: ratioj = my / My;
1234: PetscCheck(ratioj * My == my, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1235: } else {
1236: ratioj = (my - 1) / (My - 1);
1237: PetscCheck(ratioj * (My - 1) == my - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT, my, My);
1238: }
1239: if (bz == DM_BOUNDARY_PERIODIC) {
1240: ratiok = mz / Mz;
1241: PetscCheck(ratiok * Mz == mz, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " My %" PetscInt_FMT, mz, Mz);
1242: } else {
1243: ratiok = (mz - 1) / (Mz - 1);
1244: PetscCheck(ratiok * (Mz - 1) == mz - 1, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT, mz, Mz);
1245: }
1247: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &k_start, &m_f, &n_f, &p_f));
1248: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &k_start_ghost, &m_ghost, &n_ghost, &p_ghost));
1249: PetscCall(DMGetLocalToGlobalMapping(daf, <og_f));
1250: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f, &idx_f));
1252: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &k_start_c, &m_c, &n_c, &p_c));
1253: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &k_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
1254: PetscCall(DMGetLocalToGlobalMapping(dac, <og_c));
1255: PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c, &idx_c));
1257: /* loop over local fine grid nodes setting interpolation for those*/
1258: nc = 0;
1259: PetscCall(PetscMalloc1(n_f * m_f * p_f, &cols));
1260: for (k = k_start_c; k < k_start_c + p_c; k++) {
1261: for (j = j_start_c; j < j_start_c + n_c; j++) {
1262: for (i = i_start_c; i < i_start_c + m_c; i++) {
1263: PetscInt i_f = i * ratioi, j_f = j * ratioj, k_f = k * ratiok;
1264: PetscCheck(k_f >= k_start_ghost && k_f < k_start_ghost + p_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1265: "Processor's coarse DMDA must lie over fine DMDA "
1266: "k_c %" PetscInt_FMT " k_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1267: k, k_f, k_start_ghost, k_start_ghost + p_ghost);
1268: PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost + n_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1269: "Processor's coarse DMDA must lie over fine DMDA "
1270: "j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1271: j, j_f, j_start_ghost, j_start_ghost + n_ghost);
1272: PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost + m_ghost, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP,
1273: "Processor's coarse DMDA must lie over fine DMDA "
1274: "i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",
1275: i, i_f, i_start_ghost, i_start_ghost + m_ghost);
1276: row = idx_f[(m_ghost * n_ghost * (k_f - k_start_ghost) + m_ghost * (j_f - j_start_ghost) + (i_f - i_start_ghost))];
1277: cols[nc++] = row;
1278: }
1279: }
1280: }
1281: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f, &idx_f));
1282: PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c, &idx_c));
1284: PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf), dof, nc, cols, PETSC_OWN_POINTER, &isf));
1285: PetscCall(DMGetGlobalVector(dac, &vecc));
1286: PetscCall(DMGetGlobalVector(daf, &vecf));
1287: PetscCall(VecScatterCreate(vecf, isf, vecc, NULL, inject));
1288: PetscCall(DMRestoreGlobalVector(dac, &vecc));
1289: PetscCall(DMRestoreGlobalVector(daf, &vecf));
1290: PetscCall(ISDestroy(&isf));
1291: PetscFunctionReturn(PETSC_SUCCESS);
1292: }
1294: PetscErrorCode DMCreateInjection_DA(DM dac, DM daf, Mat *mat)
1295: {
1296: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc, dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1297: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1298: DMDAStencilType stc, stf;
1299: VecScatter inject = NULL;
1301: PetscFunctionBegin;
1304: PetscAssertPointer(mat, 3);
1306: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1307: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1308: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1309: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1310: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1311: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1312: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1313: PetscCheck(Mc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in x direction");
1314: PetscCheck(dimc <= 1 || Nc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in y direction");
1315: PetscCheck(dimc <= 2 || Pc >= 2, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Coarse grid requires at least 2 points in z direction");
1317: if (dimc == 1) {
1318: PetscCall(DMCreateInjection_DA_1D(dac, daf, &inject));
1319: } else if (dimc == 2) {
1320: PetscCall(DMCreateInjection_DA_2D(dac, daf, &inject));
1321: } else if (dimc == 3) {
1322: PetscCall(DMCreateInjection_DA_3D(dac, daf, &inject));
1323: }
1324: PetscCall(MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat));
1325: PetscCall(VecScatterDestroy(&inject));
1326: PetscFunctionReturn(PETSC_SUCCESS);
1327: }
1329: // PetscClangLinter pragma ignore: -fdoc-*
1330: /*@
1331: DMCreateAggregates - Deprecated, see DMDACreateAggregates.
1333: Level: intermediate
1334: @*/
1335: PetscErrorCode DMCreateAggregates(DM dac, DM daf, Mat *mat)
1336: {
1337: return DMDACreateAggregates(dac, daf, mat);
1338: }
1340: /*@
1341: DMDACreateAggregates - Gets the aggregates that map between
1342: grids associated with two `DMDA`
1344: Collective
1346: Input Parameters:
1347: + dac - the coarse grid `DMDA`
1348: - daf - the fine grid `DMDA`
1350: Output Parameter:
1351: . rest - the restriction matrix (transpose of the projection matrix)
1353: Level: intermediate
1355: Note:
1356: This routine is not used by PETSc.
1357: It is not clear what its use case is and it may be removed in a future release.
1358: Users should contact petsc-maint@mcs.anl.gov if they plan to use it.
1360: .seealso: [](sec_struct), `DMRefine()`, `DMCreateInjection()`, `DMCreateInterpolation()`
1361: @*/
1362: PetscErrorCode DMDACreateAggregates(DM dac, DM daf, Mat *rest)
1363: {
1364: PetscInt dimc, Mc, Nc, Pc, mc, nc, pc, dofc, sc;
1365: PetscInt dimf, Mf, Nf, Pf, mf, nf, pf, doff, sf;
1366: DMBoundaryType bxc, byc, bzc, bxf, byf, bzf;
1367: DMDAStencilType stc, stf;
1368: PetscInt i, j, l;
1369: PetscInt i_start, j_start, l_start, m_f, n_f, p_f;
1370: PetscInt i_start_ghost, j_start_ghost, l_start_ghost, m_ghost, n_ghost, p_ghost;
1371: const PetscInt *idx_f;
1372: PetscInt i_c, j_c, l_c;
1373: PetscInt i_start_c, j_start_c, l_start_c, m_c, n_c, p_c;
1374: PetscInt i_start_ghost_c, j_start_ghost_c, l_start_ghost_c, m_ghost_c, n_ghost_c, p_ghost_c;
1375: const PetscInt *idx_c;
1376: PetscInt d;
1377: PetscInt a;
1378: PetscInt max_agg_size;
1379: PetscInt *fine_nodes;
1380: PetscScalar *one_vec;
1381: PetscInt fn_idx;
1382: ISLocalToGlobalMapping ltogmf, ltogmc;
1384: PetscFunctionBegin;
1387: PetscAssertPointer(rest, 3);
1389: PetscCall(DMDAGetInfo(dac, &dimc, &Mc, &Nc, &Pc, &mc, &nc, &pc, &dofc, &sc, &bxc, &byc, &bzc, &stc));
1390: PetscCall(DMDAGetInfo(daf, &dimf, &Mf, &Nf, &Pf, &mf, &nf, &pf, &doff, &sf, &bxf, &byf, &bzf, &stf));
1391: PetscCheck(dimc == dimf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dimc, dimf);
1392: PetscCheck(dofc == doff, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, dofc, doff);
1393: PetscCheck(sc == sf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT, sc, sf);
1394: PetscCheck(bxc == bxf && byc == byf && bzc == bzf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Boundary type different in two DMDAs");
1395: PetscCheck(stc == stf, PetscObjectComm((PetscObject)daf), PETSC_ERR_ARG_INCOMP, "Stencil type different in two DMDAs");
1397: PetscCheck(Mf >= Mc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Mc %" PetscInt_FMT ", Mf %" PetscInt_FMT, Mc, Mf);
1398: PetscCheck(Nf >= Nc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Nc %" PetscInt_FMT ", Nf %" PetscInt_FMT, Nc, Nf);
1399: PetscCheck(Pf >= Pc, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Coarse grid has more points than fine grid, Pc %" PetscInt_FMT ", Pf %" PetscInt_FMT, Pc, Pf);
1401: if (Pc < 0) Pc = 1;
1402: if (Pf < 0) Pf = 1;
1403: if (Nc < 0) Nc = 1;
1404: if (Nf < 0) Nf = 1;
1406: PetscCall(DMDAGetCorners(daf, &i_start, &j_start, &l_start, &m_f, &n_f, &p_f));
1407: PetscCall(DMDAGetGhostCorners(daf, &i_start_ghost, &j_start_ghost, &l_start_ghost, &m_ghost, &n_ghost, &p_ghost));
1409: PetscCall(DMGetLocalToGlobalMapping(daf, <ogmf));
1410: PetscCall(ISLocalToGlobalMappingGetIndices(ltogmf, &idx_f));
1412: PetscCall(DMDAGetCorners(dac, &i_start_c, &j_start_c, &l_start_c, &m_c, &n_c, &p_c));
1413: PetscCall(DMDAGetGhostCorners(dac, &i_start_ghost_c, &j_start_ghost_c, &l_start_ghost_c, &m_ghost_c, &n_ghost_c, &p_ghost_c));
1415: PetscCall(DMGetLocalToGlobalMapping(dac, <ogmc));
1416: PetscCall(ISLocalToGlobalMappingGetIndices(ltogmc, &idx_c));
1418: /*
1419: Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios
1420: for dimension 1 and 2 respectively.
1421: Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1)
1422: and r_y*j and r_y*(j+1) will be grouped into the same coarse grid aggregate.
1423: Each specific dof on the fine grid is mapped to one dof on the coarse grid.
1424: */
1426: max_agg_size = (Mf / Mc + 1) * (Nf / Nc + 1) * (Pf / Pc + 1);
1428: /* create the matrix that will contain the restriction operator */
1429: PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c * n_c * p_c * dofc, m_f * n_f * p_f * doff, Mc * Nc * Pc * dofc, Mf * Nf * Pf * doff, max_agg_size, NULL, max_agg_size, NULL, rest));
1431: /* store nodes in the fine grid here */
1432: PetscCall(PetscMalloc2(max_agg_size, &one_vec, max_agg_size, &fine_nodes));
1433: for (i = 0; i < max_agg_size; i++) one_vec[i] = 1.0;
1435: /* loop over all coarse nodes */
1436: for (l_c = l_start_c; l_c < l_start_c + p_c; l_c++) {
1437: for (j_c = j_start_c; j_c < j_start_c + n_c; j_c++) {
1438: for (i_c = i_start_c; i_c < i_start_c + m_c; i_c++) {
1439: for (d = 0; d < dofc; d++) {
1440: /* convert to local "natural" numbering and then to PETSc global numbering */
1441: a = idx_c[dofc * (m_ghost_c * n_ghost_c * (l_c - l_start_ghost_c) + m_ghost_c * (j_c - j_start_ghost_c) + (i_c - i_start_ghost_c))] + d;
1443: fn_idx = 0;
1444: /* Corresponding fine points are all points (i_f, j_f, l_f) such that
1445: i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc
1446: (same for other dimensions)
1447: */
1448: for (l = l_c * Pf / Pc; l < PetscMin((l_c + 1) * Pf / Pc, Pf); l++) {
1449: for (j = j_c * Nf / Nc; j < PetscMin((j_c + 1) * Nf / Nc, Nf); j++) {
1450: for (i = i_c * Mf / Mc; i < PetscMin((i_c + 1) * Mf / Mc, Mf); i++) {
1451: fine_nodes[fn_idx] = idx_f[doff * (m_ghost * n_ghost * (l - l_start_ghost) + m_ghost * (j - j_start_ghost) + (i - i_start_ghost))] + d;
1452: fn_idx++;
1453: }
1454: }
1455: }
1456: /* add all these points to one aggregate */
1457: PetscCall(MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES));
1458: }
1459: }
1460: }
1461: }
1462: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmf, &idx_f));
1463: PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmc, &idx_c));
1464: PetscCall(PetscFree2(one_vec, fine_nodes));
1465: PetscCall(MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY));
1466: PetscCall(MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY));
1467: PetscFunctionReturn(PETSC_SUCCESS);
1468: }